digraph G {
0 [labelType="html" label="<br><b>AdaptiveSparkPlan</b><br><br>"];
subgraph cluster1 {
isCluster="true";
label="WholeStageCodegen (2)\n \nduration: 3 ms";
2 [labelType="html" label="<b>HashAggregate</b><br><br>spill size: 0.0 B<br>time in aggregation build: 0 ms<br>peak memory: 256.0 KiB<br>number of output rows: 1<br>number of sort fallback tasks: 0<br>avg hash probes per key: 0"];
}
3 [labelType="html" label="<b>AQEShuffleRead</b><br><br>number of partitions: 1<br>partition data size: 88.0 B<br>number of coalesced partitions: 1"];
4 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 1<br>local merged chunks fetched: 0<br>shuffle write time total (min, med, max (stageId: taskId))<br>3 ms (0 ms, 0 ms, 3 ms (stage 21.0: task 21))<br>remote merged bytes read: 0.0 B<br>local merged blocks fetched: 0<br>corrupt merged block chunks: 0<br>remote merged reqs duration: 0 ms<br>remote merged blocks fetched: 0<br>records read: 1<br>local bytes read: 81.0 B<br>fetch wait time: 0 ms<br>remote bytes read: 0.0 B<br>merged fetch fallback count: 0<br>local blocks read: 1<br>remote merged chunks fetched: 0<br>remote blocks read: 0<br>data size total (min, med, max (stageId: taskId))<br>40.0 B (0.0 B, 0.0 B, 40.0 B (stage 21.0: task 21))<br>local merged bytes read: 0.0 B<br>number of partitions: 200<br>remote reqs duration: 0 ms<br>remote bytes read to disk: 0.0 B<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>81.0 B (0.0 B, 0.0 B, 81.0 B (stage 21.0: task 21))"];
subgraph cluster5 {
isCluster="true";
label="WholeStageCodegen (1)\n \nduration: total (min, med, max (stageId: taskId))\n114 ms (0 ms, 47 ms, 67 ms (stage 21.0: task 21))";
6 [labelType="html" label="<b>HashAggregate</b><br><br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 21.0: task 22))<br>time in aggregation build total (min, med, max (stageId: taskId))<br>86 ms (0 ms, 41 ms, 45 ms (stage 21.0: task 21))<br>peak memory total (min, med, max (stageId: taskId))<br>512.0 KiB (0.0 B, 256.0 KiB, 256.0 KiB (stage 21.0: task 22))<br>number of output rows: 1<br>number of sort fallback tasks: 0<br>avg hash probes per key: 0"];
7 [labelType="html" label="<br><b>Project</b><br><br>"];
8 [labelType="html" label="<b>Filter</b><br><br>number of output rows: 1"];
9 [labelType="html" label="<b>ColumnarToRow</b><br><br>number of output rows: 26,236<br>number of input batches: 8"];
}
10 [labelType="html" label="<b>Scan parquet spark_catalog.default.airports</b><br><br>number of files read: 2<br>scan time total (min, med, max (stageId: taskId))<br>79 ms (0 ms, 39 ms, 40 ms (stage 21.0: task 22))<br>metadata time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 21.0: task 22))<br>size of files read total (min, med, max (stageId: taskId))<br>2.1 MiB (0.0 B, 0.0 B, 2.1 MiB (driver))<br>number of output rows: 26,236"];
2->0;
3->2;
4->3;
6->4;
7->6;
8->7;
9->8;
10->9;
}
11
AdaptiveSparkPlan isFinalPlan=true
HashAggregate(keys=[name#182], functions=[])
WholeStageCodegen (2)
AQEShuffleRead coalesced
Exchange hashpartitioning(name#182, 200), ENSURE_REQUIREMENTS, [plan_id=419]
HashAggregate(keys=[name#182], functions=[])
Project [name#182]
Filter (isnotnull(code#191) AND (code#191 = 0GA2))
ColumnarToRow
WholeStageCodegen (1)
FileScan parquet spark_catalog.default.airports[name#182,code#191] Batched: true, DataFilters: [isnotnull(code#191), (code#191 = 0GA2)], Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [IsNotNull(code), EqualTo(code,0GA2)], ReadSchema: struct<name:string,code:string>