digraph G {
0 [labelType="html" label="<br><b>AdaptiveSparkPlan</b><br><br>"];
1 [labelType="html" label="<br><b>TakeOrderedAndProject</b><br><br>"];
subgraph cluster2 {
isCluster="true";
label="WholeStageCodegen (2)\n \nduration: 17 ms";
3 [labelType="html" label="<b>HashAggregate</b><br><br>spill size: 0.0 B<br>time in aggregation build: 7 ms<br>peak memory: 4.2 MiB<br>number of output rows: 8,281<br>number of sort fallback tasks: 0<br>avg hash probes per key: 1"];
}
4 [labelType="html" label="<b>AQEShuffleRead</b><br><br>number of partitions: 1<br>partition data size: 82.6 KiB<br>number of coalesced partitions: 1"];
5 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 8,308<br>local merged chunks fetched: 0<br>shuffle write time total (min, med, max (stageId: taskId))<br>74 ms (0 ms, 28 ms, 46 ms (stage 3.0: task 3))<br>remote merged bytes read: 0.0 B<br>local merged blocks fetched: 0<br>corrupt merged block chunks: 0<br>remote merged reqs duration: 0 ms<br>remote merged blocks fetched: 0<br>records read: 8,308<br>local bytes read: 78.8 KiB<br>fetch wait time: 0 ms<br>remote bytes read: 0.0 B<br>merged fetch fallback count: 0<br>local blocks read: 2<br>remote merged chunks fetched: 0<br>remote blocks read: 0<br>data size total (min, med, max (stageId: taskId))<br>194.7 KiB (0.0 B, 85.4 KiB, 109.3 KiB (stage 3.0: task 4))<br>local merged bytes read: 0.0 B<br>number of partitions: 200<br>remote reqs duration: 0 ms<br>remote bytes read to disk: 0.0 B<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>78.8 KiB (0.0 B, 36.6 KiB, 42.3 KiB (stage 3.0: task 4))"];
subgraph cluster6 {
isCluster="true";
label="WholeStageCodegen (1)\n \nduration: total (min, med, max (stageId: taskId))\n117 ms (0 ms, 58 ms, 59 ms (stage 3.0: task 4))";
7 [labelType="html" label="<b>HashAggregate</b><br><br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 3.0: task 4))<br>time in aggregation build total (min, med, max (stageId: taskId))<br>41 ms (0 ms, 18 ms, 23 ms (stage 3.0: task 4))<br>peak memory total (min, med, max (stageId: taskId))<br>8.5 MiB (0.0 B, 4.2 MiB, 4.2 MiB (stage 3.0: task 4))<br>number of output rows: 8,308<br>number of sort fallback tasks: 0<br>avg hash probes per key (min, med, max (stageId: taskId)):<br>(1, 1, 1 (stage 3.0: task 4))"];
8 [labelType="html" label="<b>ColumnarToRow</b><br><br>number of output rows: 46,236<br>number of input batches: 13"];
}
9 [labelType="html" label="<b>Scan parquet spark_catalog.default.airports</b><br><br>number of files read: 2<br>scan time total (min, med, max (stageId: taskId))<br>32 ms (0 ms, 10 ms, 22 ms (stage 3.0: task 4))<br>metadata time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 3.0: task 4))<br>size of files read total (min, med, max (stageId: taskId))<br>2.1 MiB (0.0 B, 0.0 B, 2.1 MiB (driver))<br>number of output rows: 46,236"];
1->0;
3->1;
4->3;
5->4;
7->5;
8->7;
9->8;
}
10
AdaptiveSparkPlan isFinalPlan=true
TakeOrderedAndProject(limit=501, orderBy=[code#114 ASC NULLS FIRST], output=[code#114])
HashAggregate(keys=[code#114], functions=[])
WholeStageCodegen (2)
AQEShuffleRead coalesced
Exchange hashpartitioning(code#114, 200), ENSURE_REQUIREMENTS, [plan_id=115]
HashAggregate(keys=[code#114], functions=[])
ColumnarToRow
WholeStageCodegen (1)
FileScan parquet spark_catalog.default.airports[code#114] Batched: true, DataFilters: [], Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [], ReadSchema: struct<code:string>