1 | #include "duckdb/common/tree_renderer.hpp" |
2 | #include "duckdb/common/types/column/column_data_collection.hpp" |
3 | #include "duckdb/execution/operator/helper/physical_explain_analyze.hpp" |
4 | #include "duckdb/execution/operator/scan/physical_column_data_scan.hpp" |
5 | #include "duckdb/execution/physical_plan_generator.hpp" |
6 | #include "duckdb/main/client_context.hpp" |
7 | #include "duckdb/planner/operator/logical_explain.hpp" |
8 | |
9 | namespace duckdb { |
10 | |
11 | unique_ptr<PhysicalOperator> PhysicalPlanGenerator::CreatePlan(LogicalExplain &op) { |
12 | D_ASSERT(op.children.size() == 1); |
13 | auto logical_plan_opt = op.children[0]->ToString(); |
14 | auto plan = CreatePlan(op&: *op.children[0]); |
15 | if (op.explain_type == ExplainType::EXPLAIN_ANALYZE) { |
16 | auto result = make_uniq<PhysicalExplainAnalyze>(args&: op.types); |
17 | result->children.push_back(x: std::move(plan)); |
18 | return std::move(result); |
19 | } |
20 | |
21 | op.physical_plan = plan->ToString(); |
22 | // the output of the explain |
23 | vector<string> keys, values; |
24 | switch (ClientConfig::GetConfig(context).explain_output_type) { |
25 | case ExplainOutputType::OPTIMIZED_ONLY: |
26 | keys = {"logical_opt" }; |
27 | values = {logical_plan_opt}; |
28 | break; |
29 | case ExplainOutputType::PHYSICAL_ONLY: |
30 | keys = {"physical_plan" }; |
31 | values = {op.physical_plan}; |
32 | break; |
33 | default: |
34 | keys = {"logical_plan" , "logical_opt" , "physical_plan" }; |
35 | values = {op.logical_plan_unopt, logical_plan_opt, op.physical_plan}; |
36 | } |
37 | |
38 | // create a ColumnDataCollection from the output |
39 | auto &allocator = Allocator::Get(context); |
40 | vector<LogicalType> plan_types {LogicalType::VARCHAR, LogicalType::VARCHAR}; |
41 | auto collection = |
42 | make_uniq<ColumnDataCollection>(args&: context, args&: plan_types, args: ColumnDataAllocatorType::IN_MEMORY_ALLOCATOR); |
43 | |
44 | DataChunk chunk; |
45 | chunk.Initialize(allocator, types: op.types); |
46 | for (idx_t i = 0; i < keys.size(); i++) { |
47 | chunk.SetValue(col_idx: 0, index: chunk.size(), val: Value(keys[i])); |
48 | chunk.SetValue(col_idx: 1, index: chunk.size(), val: Value(values[i])); |
49 | chunk.SetCardinality(chunk.size() + 1); |
50 | if (chunk.size() == STANDARD_VECTOR_SIZE) { |
51 | collection->Append(new_chunk&: chunk); |
52 | chunk.Reset(); |
53 | } |
54 | } |
55 | collection->Append(new_chunk&: chunk); |
56 | |
57 | // create a chunk scan to output the result |
58 | auto chunk_scan = make_uniq<PhysicalColumnDataScan>(args&: op.types, args: PhysicalOperatorType::COLUMN_DATA_SCAN, |
59 | args&: op.estimated_cardinality, args: std::move(collection)); |
60 | return std::move(chunk_scan); |
61 | } |
62 | |
63 | } // namespace duckdb |
64 | |