digraph G {
0 [labelType="html" label="<br><b>AdaptiveSparkPlan</b><br><br>"];
subgraph cluster1 {
isCluster="true";
label="WholeStageCodegen (2)\n \nduration: 4 ms";
2 [labelType="html" label="<br><b>Project</b><br><br>"];
3 [labelType="html" label="<b>HashAggregate</b><br><br>spill size: 0.0 B<br>time in aggregation build: 0 ms<br>peak memory: 256.0 KiB<br>number of output rows: 1<br>number of sort fallback tasks: 0<br>avg hash probes per key: 0"];
}
4 [labelType="html" label="<b>AQEShuffleRead</b><br><br>number of partitions: 1<br>partition data size: 60.0 B<br>number of coalesced partitions: 1"];
5 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 1<br>local merged chunks fetched: 0<br>shuffle write time total (min, med, max (stageId: taskId))<br>2 ms (0 ms, 2 ms, 2 ms (stage 30.0: task 30))<br>remote merged bytes read: 0.0 B<br>local merged blocks fetched: 0<br>corrupt merged block chunks: 0<br>remote merged reqs duration: 0 ms<br>remote merged blocks fetched: 0<br>records read: 1<br>local bytes read: 60.0 B<br>fetch wait time: 0 ms<br>remote bytes read: 0.0 B<br>merged fetch fallback count: 0<br>local blocks read: 1<br>remote merged chunks fetched: 0<br>remote blocks read: 0<br>data size total (min, med, max (stageId: taskId))<br>24.0 B (0.0 B, 24.0 B, 24.0 B (stage 30.0: task 30))<br>local merged bytes read: 0.0 B<br>number of partitions: 200<br>remote reqs duration: 0 ms<br>remote bytes read to disk: 0.0 B<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>60.0 B (0.0 B, 60.0 B, 60.0 B (stage 30.0: task 30))"];
subgraph cluster6 {
isCluster="true";
label="WholeStageCodegen (1)\n \nduration: total (min, med, max (stageId: taskId))\n47 ms (0 ms, 47 ms, 47 ms (stage 30.0: task 30))";
7 [labelType="html" label="<b>HashAggregate</b><br><br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 30.0: task 30))<br>time in aggregation build total (min, med, max (stageId: taskId))<br>4 ms (0 ms, 4 ms, 4 ms (stage 30.0: task 30))<br>peak memory total (min, med, max (stageId: taskId))<br>256.0 KiB (0.0 B, 256.0 KiB, 256.0 KiB (stage 30.0: task 30))<br>number of output rows: 1<br>number of sort fallback tasks: 0<br>avg hash probes per key: 0"];
8 [labelType="html" label="<br><b>Project</b><br><br>"];
}
9 [labelType="html" label="<b>Scan hive spark_catalog.default.alltypes</b><br><br>number of output rows: 1"];
2->0;
3->2;
4->3;
5->4;
7->5;
8->7;
9->8;
}
10
AdaptiveSparkPlan isFinalPlan=true
Project [BOOLEAN#915]
HashAggregate(keys=[BOOLEAN#915, C1#905], functions=[])
WholeStageCodegen (2)
AQEShuffleRead coalesced
Exchange hashpartitioning(BOOLEAN#915, C1#905, 200), ENSURE_REQUIREMENTS, [plan_id=780]
HashAggregate(keys=[BOOLEAN#915, C1#905], functions=[])
Project [BOOLEAN#915, BOOLEAN#915 AS C1#905]
WholeStageCodegen (1)
Scan hive spark_catalog.default.alltypes [BOOLEAN#915], HiveTableRelation [`spark_catalog`.`default`.`alltypes`, org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, Data Cols: [STRING#908, DOUBLE#909, INTEGER#910, BIGINT#911L, FLOAT#912, DECIMAL#913, NUMBER#914, BOOLEAN#91..., Partition Cols: []]
== Physical Plan ==
AdaptiveSparkPlan (14)
+- == Final Plan ==
* Project (8)
+- * HashAggregate (7)
+- AQEShuffleRead (6)
+- ShuffleQueryStage (5), Statistics(sizeInBytes=24.0 B, rowCount=1)
+- Exchange (4)
+- * HashAggregate (3)
+- * Project (2)
+- Scan hive spark_catalog.default.alltypes (1)
+- == Initial Plan ==
TakeOrderedAndProject (13)
+- HashAggregate (12)
+- Exchange (11)
+- HashAggregate (10)
+- Project (9)
+- Scan hive spark_catalog.default.alltypes (1)
(1) Scan hive spark_catalog.default.alltypes
Output [1]: [BOOLEAN#915]
Arguments: [BOOLEAN#915], HiveTableRelation [`spark_catalog`.`default`.`alltypes`, org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, Data Cols: [STRING#908, DOUBLE#909, INTEGER#910, BIGINT#911L, FLOAT#912, DECIMAL#913, NUMBER#914, BOOLEAN#91..., Partition Cols: []]
(2) Project [codegen id : 1]
Output [2]: [BOOLEAN#915, BOOLEAN#915 AS C1#905]
Input [1]: [BOOLEAN#915]
(3) HashAggregate [codegen id : 1]
Input [2]: [BOOLEAN#915, C1#905]
Keys [2]: [BOOLEAN#915, C1#905]
Functions: []
Aggregate Attributes: []
Results [2]: [BOOLEAN#915, C1#905]
(4) Exchange
Input [2]: [BOOLEAN#915, C1#905]
Arguments: hashpartitioning(BOOLEAN#915, C1#905, 200), ENSURE_REQUIREMENTS, [plan_id=780]
(5) ShuffleQueryStage
Output [2]: [BOOLEAN#915, C1#905]
Arguments: 0
(6) AQEShuffleRead
Input [2]: [BOOLEAN#915, C1#905]
Arguments: coalesced
(7) HashAggregate [codegen id : 2]
Input [2]: [BOOLEAN#915, C1#905]
Keys [2]: [BOOLEAN#915, C1#905]
Functions: []
Aggregate Attributes: []
Results [3]: [BOOLEAN#915, CASE WHEN isnotnull(C1#905) THEN cast(C1#905 as int) ELSE 0 END AS C2#906, CASE WHEN isnull(C1#905) THEN 0 ELSE 1 END AS C3#907]
(8) Project [codegen id : 2]
Output [1]: [BOOLEAN#915]
Input [3]: [BOOLEAN#915, C2#906, C3#907]
(9) Project
Output [2]: [BOOLEAN#915, BOOLEAN#915 AS C1#905]
Input [1]: [BOOLEAN#915]
(10) HashAggregate
Input [2]: [BOOLEAN#915, C1#905]
Keys [2]: [BOOLEAN#915, C1#905]
Functions: []
Aggregate Attributes: []
Results [2]: [BOOLEAN#915, C1#905]
(11) Exchange
Input [2]: [BOOLEAN#915, C1#905]
Arguments: hashpartitioning(BOOLEAN#915, C1#905, 200), ENSURE_REQUIREMENTS, [plan_id=766]
(12) HashAggregate
Input [2]: [BOOLEAN#915, C1#905]
Keys [2]: [BOOLEAN#915, C1#905]
Functions: []
Aggregate Attributes: []
Results [3]: [BOOLEAN#915, CASE WHEN isnotnull(C1#905) THEN cast(C1#905 as int) ELSE 0 END AS C2#906, CASE WHEN isnull(C1#905) THEN 0 ELSE 1 END AS C3#907]
(13) TakeOrderedAndProject
Input [3]: [BOOLEAN#915, C2#906, C3#907]
Arguments: 501, [C2#906 ASC NULLS FIRST, C3#907 ASC NULLS FIRST], [BOOLEAN#915]
(14) AdaptiveSparkPlan
Output [1]: [BOOLEAN#915]
Arguments: isFinalPlan=true