jonathon
|
[32]
|
03d4d9e8-6a2d-4d2d-a253-63696f36868f
|
2025/06/13 06:57:07
|
2025/06/13 06:57:08
|
2025/06/13 06:57:08
|
307 ms
|
403 ms
|
SELECT C_4 AS C_17, C_6 AS C_13, C_7 AS C_18, C_4331 AS C_21, C_4332 AS C_16, C_4333 AS C_14, C_0 AS C_12, C_8 AS C_23, C_1 AS C_15, C_9 AS C_20, C_2 AS C_22, C_10 AS C_25, C_11 AS C_24, C_5 AS C_19 FROM (SELECT C_64656661756c745f616972706f727473.`id` AS C_4, C_64656661756c745f616972706f727473.`type` AS C_6, C_64656661756c745f616972706f727473.`name` AS C_7, C_64656661756c745f616972706f727473.`lat` AS C_43, C_64656661756c745f616972706f727473.`lon` AS C_3, C_64656661756c745f616972706f727473.`elev` AS C_5, C_64656661756c745f616972706f727473.`continent` AS C_0, C_64656661756c745f616972706f727473.`country` AS C_8, C_64656661756c745f616972706f727473.`region` AS C_1, C_64656661756c745f616972706f727473.`city` AS C_9, C_64656661756c745f616972706f727473.`iata` AS C_2, C_64656661756c745f616972706f727473.`code` AS C_10, C_64656661756c745f616972706f727473.`gps` AS C_11, (round((C_64656661756c745f616972706f727473.`lat` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4331, (round((C_64656661756c745f616972706f727473.`lon` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4332, (round((C_64656661756c745f616972706f727473.`elev` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4333 FROM `default`.`airports` C_64656661756c745f616972706f727473 WHERE ((C_64656661756c745f616972706f727473.`lon` <= (- 1.040500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lon` >= (- 1.110500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lat` >= 4.100000000000000E+001) AND (C_64656661756c745f616972706f727473.`lat` <= 4.500000000000000E+001)) ) C_4954424c ORDER BY C_19 DESC LIMIT 5
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'GlobalLimit 5
+- 'LocalLimit 5
+- 'Sort ['C_19 DESC NULLS LAST], true
+- 'Project ['C_4 AS C_17#1976, 'C_6 AS C_13#1977, 'C_7 AS C_18#1978, 'C_4331 AS C_21#1979, 'C_4332 AS C_16#1980, 'C_4333 AS C_14#1981, 'C_0 AS C_12#1982, 'C_8 AS C_23#1983, 'C_1 AS C_15#1984, 'C_9 AS C_20#1985, 'C_2 AS C_22#1986, 'C_10 AS C_25#1987, 'C_11 AS C_24#1988, 'C_5 AS C_19#1989]
+- 'SubqueryAlias C_4954424c
+- 'Project ['C_64656661756c745f616972706f727473.id AS C_4#1960, 'C_64656661756c745f616972706f727473.type AS C_6#1961, 'C_64656661756c745f616972706f727473.name AS C_7#1962, 'C_64656661756c745f616972706f727473.lat AS C_43#1963, 'C_64656661756c745f616972706f727473.lon AS C_3#1964, 'C_64656661756c745f616972706f727473.elev AS C_5#1965, 'C_64656661756c745f616972706f727473.continent AS C_0#1966, 'C_64656661756c745f616972706f727473.country AS C_8#1967, 'C_64656661756c745f616972706f727473.region AS C_1#1968, 'C_64656661756c745f616972706f727473.city AS C_9#1969, 'C_64656661756c745f616972706f727473.iata AS C_2#1970, 'C_64656661756c745f616972706f727473.code AS C_10#1971, 'C_64656661756c745f616972706f727473.gps AS C_11#1972, ('round(('C_64656661756c745f616972706f727473.lat * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4331#1973, ('round(('C_64656661756c745f616972706f727473.lon * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4332#1974, ('round(('C_64656661756c745f616972706f727473.elev * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4333#1975]
+- 'Filter ((('C_64656661756c745f616972706f727473.lon <= -104.05) AND ('C_64656661756c745f616972706f727473.lon >= -111.05)) AND (('C_64656661756c745f616972706f727473.lat >= 41.0) AND ('C_64656661756c745f616972706f727473.lat <= 45.0)))
+- 'SubqueryAlias C_64656661756c745f616972706f727473
+- 'UnresolvedRelation [default, airports], [], false
== Analyzed Logical Plan ==
C_17: string, C_13: string, C_18: string, C_21: double, C_16: double, C_14: double, C_12: string, C_23: string, C_15: string, C_20: string, C_22: string, C_25: string, C_24: string, C_19: double
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_19#1989 DESC NULLS LAST], true
+- Project [C_4#1960 AS C_17#1976, C_6#1961 AS C_13#1977, C_7#1962 AS C_18#1978, C_4331#1973 AS C_21#1979, C_4332#1974 AS C_16#1980, C_4333#1975 AS C_14#1981, C_0#1966 AS C_12#1982, C_8#1967 AS C_23#1983, C_1#1968 AS C_15#1984, C_9#1969 AS C_20#1985, C_2#1970 AS C_22#1986, C_10#1971 AS C_25#1987, C_11#1972 AS C_24#1988, C_5#1965 AS C_19#1989]
+- SubqueryAlias C_4954424c
+- Project [id#1990 AS C_4#1960, type#1991 AS C_6#1961, name#1992 AS C_7#1962, lat#1993 AS C_43#1963, lon#1994 AS C_3#1964, elev#1995 AS C_5#1965, continent#1996 AS C_0#1966, country#1997 AS C_8#1967, region#1998 AS C_1#1968, city#1999 AS C_9#1969, iata#2000 AS C_2#1970, code#2001 AS C_10#1971, gps#2002 AS C_11#1972, (round((lat#1993 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4331#1973, (round((lon#1994 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4332#1974, (round((elev#1995 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4333#1975]
+- Filter (((lon#1994 <= -104.05) AND (lon#1994 >= -111.05)) AND ((lat#1993 >= 41.0) AND (lat#1993 <= 45.0)))
+- SubqueryAlias C_64656661756c745f616972706f727473
+- SubqueryAlias spark_catalog.default.airports
+- Relation spark_catalog.default.airports[id#1990,type#1991,name#1992,lat#1993,lon#1994,elev#1995,continent#1996,country#1997,region#1998,city#1999,iata#2000,code#2001,gps#2002] parquet
== Optimized Logical Plan ==
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_19#1989 DESC NULLS LAST], true
+- Project [id#1990 AS C_17#1976, type#1991 AS C_13#1977, name#1992 AS C_18#1978, (round((lat#1993 * 1000.0), 0) / 1000.0) AS C_21#1979, (round((lon#1994 * 1000.0), 0) / 1000.0) AS C_16#1980, (round((elev#1995 * 1000.0), 0) / 1000.0) AS C_14#1981, continent#1996 AS C_12#1982, country#1997 AS C_23#1983, region#1998 AS C_15#1984, city#1999 AS C_20#1985, iata#2000 AS C_22#1986, code#2001 AS C_25#1987, gps#2002 AS C_24#1988, elev#1995 AS C_19#1989]
+- Filter ((isnotnull(lon#1994) AND isnotnull(lat#1993)) AND (((lon#1994 <= -104.05) AND (lon#1994 >= -111.05)) AND ((lat#1993 >= 41.0) AND (lat#1993 <= 45.0))))
+- Relation spark_catalog.default.airports[id#1990,type#1991,name#1992,lat#1993,lon#1994,elev#1995,continent#1996,country#1997,region#1998,city#1999,iata#2000,code#2001,gps#2002] parquet
== Physical Plan ==
TakeOrderedAndProject(limit=5, orderBy=[C_19#1989 DESC NULLS LAST], output=[C_17#1976,C_13#1977,C_18#1978,C_21#1979,C_16#1980,C_14#1981,C_12#1982,C_23#1983,C_15#1984,C_20#1985,C_22#1986,C_25#1987,C_24#1988,C_19#1989])
+- *(1) Project [id#1990 AS C_17#1976, type#1991 AS C_13#1977, name#1992 AS C_18#1978, (round((lat#1993 * 1000.0), 0) / 1000.0) AS C_21#1979, (round((lon#1994 * 1000.0), 0) / 1000.0) AS C_16#1980, (round((elev#1995 * 1000.0), 0) / 1000.0) AS C_14#1981, continent#1996 AS C_12#1982, country#1997 AS C_23#1983, region#1998 AS C_15#1984, city#1999 AS C_20#1985, iata#2000 AS C_22#1986, code#2001 AS C_25#1987, gps#2002 AS C_24#1988, elev#1995 AS C_19#1989]
+- *(1) Filter (((((isnotnull(lon#1994) AND isnotnull(lat#1993)) AND (lon#1994 <= -104.05)) AND (lon#1994 >= -111.05)) AND (lat#1993 >= 41.0)) AND (lat#1993 <= 45.0))
+- *(1) ColumnarToRow
+- FileScan parquet spark_catalog.default.airports[id#1990,type#1991,name#1992,lat#1993,lon#1994,elev#1995,continent#1996,country#1997,region#1998,city#1999,iata#2000,code#2001,gps#2002] Batched: true, DataFilters: [isnotnull(lon#1994), isnotnull(lat#1993), (lon#1994 <= -104.05), (lon#1994 >= -111.05), (lat#199..., Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [IsNotNull(lon), IsNotNull(lat), LessThanOrEqual(lon,-104.05), GreaterThanOrEqual(lon,-111.05), G..., ReadSchema: struct<id:string,type:string,name:string,lat:double,lon:double,elev:double,continent:string,count...
|
jonathon
|
[48]
|
04f41b96-5df1-4fc1-93b2-986be81617a4
|
2025/06/14 06:31:59
|
2025/06/14 06:31:59
|
2025/06/14 06:31:59
|
238 ms
|
335 ms
|
SELECT C_10 AS C_13, C_43 AS C_18, C_4 AS C_23, C_4331 AS C_16, C_4332 AS C_21, C_4333 AS C_22, C_7 AS C_20, C_2 AS C_15, C_5 AS C_25, C_8 AS C_12, C_9 AS C_24, C_3 AS C_14, C_11 AS C_19, C_1 AS C_17 FROM (SELECT C_64656661756c745f616972706f727473.`id` AS C_10, C_64656661756c745f616972706f727473.`type` AS C_43, C_64656661756c745f616972706f727473.`name` AS C_4, C_64656661756c745f616972706f727473.`lat` AS C_0, C_64656661756c745f616972706f727473.`lon` AS C_6, C_64656661756c745f616972706f727473.`elev` AS C_1, C_64656661756c745f616972706f727473.`continent` AS C_7, C_64656661756c745f616972706f727473.`country` AS C_2, C_64656661756c745f616972706f727473.`region` AS C_5, C_64656661756c745f616972706f727473.`city` AS C_8, C_64656661756c745f616972706f727473.`iata` AS C_9, C_64656661756c745f616972706f727473.`code` AS C_3, C_64656661756c745f616972706f727473.`gps` AS C_11, (round((C_64656661756c745f616972706f727473.`lat` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4331, (round((C_64656661756c745f616972706f727473.`lon` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4332, (round((C_64656661756c745f616972706f727473.`elev` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4333 FROM `default`.`airports` C_64656661756c745f616972706f727473 WHERE ((C_64656661756c745f616972706f727473.`lon` <= (- 1.040500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lon` >= (- 1.110500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lat` >= 4.100000000000000E+001) AND (C_64656661756c745f616972706f727473.`lat` <= 4.500000000000000E+001)) ) C_4954424c ORDER BY C_17 DESC LIMIT 5
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'GlobalLimit 5
+- 'LocalLimit 5
+- 'Sort ['C_17 DESC NULLS LAST], true
+- 'Project ['C_10 AS C_13#5222, 'C_43 AS C_18#5223, 'C_4 AS C_23#5224, 'C_4331 AS C_16#5225, 'C_4332 AS C_21#5226, 'C_4333 AS C_22#5227, 'C_7 AS C_20#5228, 'C_2 AS C_15#5229, 'C_5 AS C_25#5230, 'C_8 AS C_12#5231, 'C_9 AS C_24#5232, 'C_3 AS C_14#5233, 'C_11 AS C_19#5234, 'C_1 AS C_17#5235]
+- 'SubqueryAlias C_4954424c
+- 'Project ['C_64656661756c745f616972706f727473.id AS C_10#5206, 'C_64656661756c745f616972706f727473.type AS C_43#5207, 'C_64656661756c745f616972706f727473.name AS C_4#5208, 'C_64656661756c745f616972706f727473.lat AS C_0#5209, 'C_64656661756c745f616972706f727473.lon AS C_6#5210, 'C_64656661756c745f616972706f727473.elev AS C_1#5211, 'C_64656661756c745f616972706f727473.continent AS C_7#5212, 'C_64656661756c745f616972706f727473.country AS C_2#5213, 'C_64656661756c745f616972706f727473.region AS C_5#5214, 'C_64656661756c745f616972706f727473.city AS C_8#5215, 'C_64656661756c745f616972706f727473.iata AS C_9#5216, 'C_64656661756c745f616972706f727473.code AS C_3#5217, 'C_64656661756c745f616972706f727473.gps AS C_11#5218, ('round(('C_64656661756c745f616972706f727473.lat * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4331#5219, ('round(('C_64656661756c745f616972706f727473.lon * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4332#5220, ('round(('C_64656661756c745f616972706f727473.elev * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4333#5221]
+- 'Filter ((('C_64656661756c745f616972706f727473.lon <= -104.05) AND ('C_64656661756c745f616972706f727473.lon >= -111.05)) AND (('C_64656661756c745f616972706f727473.lat >= 41.0) AND ('C_64656661756c745f616972706f727473.lat <= 45.0)))
+- 'SubqueryAlias C_64656661756c745f616972706f727473
+- 'UnresolvedRelation [default, airports], [], false
== Analyzed Logical Plan ==
C_13: string, C_18: string, C_23: string, C_16: double, C_21: double, C_22: double, C_20: string, C_15: string, C_25: string, C_12: string, C_24: string, C_14: string, C_19: string, C_17: double
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_17#5235 DESC NULLS LAST], true
+- Project [C_10#5206 AS C_13#5222, C_43#5207 AS C_18#5223, C_4#5208 AS C_23#5224, C_4331#5219 AS C_16#5225, C_4332#5220 AS C_21#5226, C_4333#5221 AS C_22#5227, C_7#5212 AS C_20#5228, C_2#5213 AS C_15#5229, C_5#5214 AS C_25#5230, C_8#5215 AS C_12#5231, C_9#5216 AS C_24#5232, C_3#5217 AS C_14#5233, C_11#5218 AS C_19#5234, C_1#5211 AS C_17#5235]
+- SubqueryAlias C_4954424c
+- Project [id#5236 AS C_10#5206, type#5237 AS C_43#5207, name#5238 AS C_4#5208, lat#5239 AS C_0#5209, lon#5240 AS C_6#5210, elev#5241 AS C_1#5211, continent#5242 AS C_7#5212, country#5243 AS C_2#5213, region#5244 AS C_5#5214, city#5245 AS C_8#5215, iata#5246 AS C_9#5216, code#5247 AS C_3#5217, gps#5248 AS C_11#5218, (round((lat#5239 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4331#5219, (round((lon#5240 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4332#5220, (round((elev#5241 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4333#5221]
+- Filter (((lon#5240 <= -104.05) AND (lon#5240 >= -111.05)) AND ((lat#5239 >= 41.0) AND (lat#5239 <= 45.0)))
+- SubqueryAlias C_64656661756c745f616972706f727473
+- SubqueryAlias spark_catalog.default.airports
+- Relation spark_catalog.default.airports[id#5236,type#5237,name#5238,lat#5239,lon#5240,elev#5241,continent#5242,country#5243,region#5244,city#5245,iata#5246,code#5247,gps#5248] parquet
== Optimized Logical Plan ==
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_17#5235 DESC NULLS LAST], true
+- Project [id#5236 AS C_13#5222, type#5237 AS C_18#5223, name#5238 AS C_23#5224, (round((lat#5239 * 1000.0), 0) / 1000.0) AS C_16#5225, (round((lon#5240 * 1000.0), 0) / 1000.0) AS C_21#5226, (round((elev#5241 * 1000.0), 0) / 1000.0) AS C_22#5227, continent#5242 AS C_20#5228, country#5243 AS C_15#5229, region#5244 AS C_25#5230, city#5245 AS C_12#5231, iata#5246 AS C_24#5232, code#5247 AS C_14#5233, gps#5248 AS C_19#5234, elev#5241 AS C_17#5235]
+- Filter ((isnotnull(lon#5240) AND isnotnull(lat#5239)) AND (((lon#5240 <= -104.05) AND (lon#5240 >= -111.05)) AND ((lat#5239 >= 41.0) AND (lat#5239 <= 45.0))))
+- Relation spark_catalog.default.airports[id#5236,type#5237,name#5238,lat#5239,lon#5240,elev#5241,continent#5242,country#5243,region#5244,city#5245,iata#5246,code#5247,gps#5248] parquet
== Physical Plan ==
TakeOrderedAndProject(limit=5, orderBy=[C_17#5235 DESC NULLS LAST], output=[C_13#5222,C_18#5223,C_23#5224,C_16#5225,C_21#5226,C_22#5227,C_20#5228,C_15#5229,C_25#5230,C_12#5231,C_24#5232,C_14#5233,C_19#5234,C_17#5235])
+- *(1) Project [id#5236 AS C_13#5222, type#5237 AS C_18#5223, name#5238 AS C_23#5224, (round((lat#5239 * 1000.0), 0) / 1000.0) AS C_16#5225, (round((lon#5240 * 1000.0), 0) / 1000.0) AS C_21#5226, (round((elev#5241 * 1000.0), 0) / 1000.0) AS C_22#5227, continent#5242 AS C_20#5228, country#5243 AS C_15#5229, region#5244 AS C_25#5230, city#5245 AS C_12#5231, iata#5246 AS C_24#5232, code#5247 AS C_14#5233, gps#5248 AS C_19#5234, elev#5241 AS C_17#5235]
+- *(1) Filter (((((isnotnull(lon#5240) AND isnotnull(lat#5239)) AND (lon#5240 <= -104.05)) AND (lon#5240 >= -111.05)) AND (lat#5239 >= 41.0)) AND (lat#5239 <= 45.0))
+- *(1) ColumnarToRow
+- FileScan parquet spark_catalog.default.airports[id#5236,type#5237,name#5238,lat#5239,lon#5240,elev#5241,continent#5242,country#5243,region#5244,city#5245,iata#5246,code#5247,gps#5248] Batched: true, DataFilters: [isnotnull(lon#5240), isnotnull(lat#5239), (lon#5240 <= -104.05), (lon#5240 >= -111.05), (lat#523..., Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [IsNotNull(lon), IsNotNull(lat), LessThanOrEqual(lon,-104.05), GreaterThanOrEqual(lon,-111.05), G..., ReadSchema: struct<id:string,type:string,name:string,lat:double,lon:double,elev:double,continent:string,count...
|
jonathon
|
|
088906e4-b3ff-45c5-8c3a-b42f2d244749
|
2025/06/13 23:29:58
|
2025/06/13 23:29:59
|
2025/06/13 23:29:59
|
82 ms
|
190 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#3833, data_type#3834, comment#3835]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#3833, data_type#3834, comment#3835]
== Optimized Logical Plan ==
CommandResult [col_name#3833, data_type#3834, comment#3835], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#3833, data_type#3834, comment#3835]
== Physical Plan ==
CommandResult [col_name#3833, data_type#3834, comment#3835]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#3833, data_type#3834, comment#3835]
|
jonathon
|
|
089371d8-0d89-489e-bad8-9288df1edeee
|
2025/06/13 22:18:17
|
2025/06/13 22:18:17
|
2025/06/13 22:18:17
|
42 ms
|
184 ms
|
set -v
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
SetCommand (-v,None)
== Analyzed Logical Plan ==
key: string, value: string, meaning: string, Since version: string
SetCommand (-v,None)
== Optimized Logical Plan ==
CommandResult [key#2429, value#2430, meaning#2431, Since version#2432], Execute SetCommand, [[spark.sql.adaptive.advisoryPartitionSizeInBytes,<value of spark.sql.adaptive.shuffle.targetPostShuffleInputSize>,The advisory size in bytes of the shuffle partition during adaptive optimization (when spark.sql.adaptive.enabled is true). It takes effect when Spark coalesces small shuffle partitions or splits skewed shuffle partition.,3.0.0], [spark.sql.adaptive.autoBroadcastJoinThreshold,<undefined>,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. The default value is same with spark.sql.autoBroadcastJoinThreshold. Note that, this config is used only in adaptive framework.,3.2.0], [spark.sql.adaptive.coalescePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will coalesce contiguous shuffle partitions according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid too many small tasks.,3.0.0], [spark.sql.adaptive.coalescePartitions.initialPartitionNum,<undefined>,The initial number of shuffle partitions before coalescing. If not set, it equals to spark.sql.shuffle.partitions. This configuration only has an effect when 'spark.sql.adaptive.enabled' and 'spark.sql.adaptive.coalescePartitions.enabled' are both true.,3.0.0], [spark.sql.adaptive.coalescePartitions.minPartitionSize,1MB,The minimum size of shuffle partitions after coalescing. This is useful when the adaptively calculated target size is too small during partition coalescing.,3.2.0], [spark.sql.adaptive.coalescePartitions.parallelismFirst,true,When true, Spark does not respect the target size specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes' (default 64MB) when coalescing contiguous shuffle partitions, but adaptively calculate the target size according to the default parallelism of the Spark cluster. The calculated size is usually smaller than the configured target size. This is to maximize the parallelism and avoid performance regression when enabling adaptive query execution. It's recommended to set this config to false and respect the configured target size.,3.2.0], [spark.sql.adaptive.customCostEvaluatorClass,<undefined>,The custom cost evaluator class to be used for adaptive execution. If not being set, Spark will use its own SimpleCostEvaluator by default.,3.2.0], [spark.sql.adaptive.enabled,true,When true, enable adaptive query execution, which re-optimizes the query plan in the middle of query execution, based on accurate runtime statistics.,1.6.0], [spark.sql.adaptive.forceOptimizeSkewedJoin,false,When true, force enable OptimizeSkewedJoin even if it introduces extra shuffle.,3.3.0], [spark.sql.adaptive.localShuffleReader.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark tries to use local shuffle reader to read the shuffle data when the shuffle partitioning is not needed, for example, after converting sort-merge join to broadcast-hash join.,3.0.0], [spark.sql.adaptive.maxShuffledHashJoinLocalMapThreshold,0b,Configures the maximum size in bytes per partition that can be allowed to build local hash map. If this value is not smaller than spark.sql.adaptive.advisoryPartitionSizeInBytes and all the partition size are not larger than this config, join selection prefer to use shuffled hash join instead of sort merge join regardless of the value of spark.sql.join.preferSortMergeJoin.,3.2.0], [spark.sql.adaptive.optimizeSkewsInRebalancePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will optimize the skewed shuffle partitions in RebalancePartitions and split them to smaller ones according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid data skew.,3.2.0], [spark.sql.adaptive.optimizer.excludedRules,<undefined>,Configures a list of rules to be disabled in the adaptive optimizer, in which the rules are specified by their rule names and separated by comma. The optimizer will log the rules that have indeed been excluded.,3.1.0], [spark.sql.adaptive.rebalancePartitionsSmallPartitionFactor,0.2,A partition will be merged during splitting if its size is small than this factor multiply spark.sql.adaptive.advisoryPartitionSizeInBytes.,3.3.0], [spark.sql.adaptive.skewJoin.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark dynamically handles skew in shuffled join (sort-merge and shuffled hash) by splitting (and replicating if needed) skewed partitions.,3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionFactor,5.0,A partition is considered as skewed if its size is larger than this factor multiplying the median partition size and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes',3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes,256MB,A partition is considered as skewed if its size in bytes is larger than this threshold and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionFactor' multiplying the median partition size. Ideally this config should be set larger than 'spark.sql.adaptive.advisoryPartitionSizeInBytes'.,3.0.0], [spark.sql.allowNamedFunctionArguments,true,If true, Spark will turn on support for named parameters for all functions that has it implemented.,3.5.0], [spark.sql.ansi.doubleQuotedIdentifiers,false,When true and 'spark.sql.ansi.enabled' is true, Spark SQL reads literals enclosed in double quoted (") as identifiers. When false they are read as string literals.,3.4.0], [spark.sql.ansi.enabled,false,When true, Spark SQL uses an ANSI compliant dialect instead of being Hive compliant. For example, Spark will throw an exception at runtime instead of returning null results when the inputs to a SQL operator/function are invalid.For full details of this dialect, you can find them in the section "ANSI Compliance" of Spark's documentation. Some ANSI dialect features may be not from the ANSI SQL standard directly, but their behaviors align with ANSI SQL's style,3.0.0], [spark.sql.ansi.enforceReservedKeywords,false,When true and 'spark.sql.ansi.enabled' is true, the Spark SQL parser enforces the ANSI reserved keywords and forbids SQL queries that use reserved keywords as alias names and/or identifiers for table, view, function, etc.,3.3.0], [spark.sql.ansi.relationPrecedence,false,When true and 'spark.sql.ansi.enabled' is true, JOIN takes precedence over comma when combining relation. For example, `t1, t2 JOIN t3` should result to `t1 X (t2 X t3)`. If the config is false, the result is `(t1 X t2) X t3`.,3.4.0], [spark.sql.autoBroadcastJoinThreshold,10MB,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. Note that currently statistics are only supported for Hive Metastore tables where the command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been run, and file-based data source tables where the statistics are computed directly on the files of data.,1.1.0], [spark.sql.avro.compression.codec,snappy,Compression codec used in writing of AVRO files. Supported codecs: uncompressed, deflate, snappy, bzip2, xz and zstandard. Default codec is snappy.,2.4.0], ... 183 more fields]
+- SetCommand (-v,None)
== Physical Plan ==
CommandResult [key#2429, value#2430, meaning#2431, Since version#2432]
+- Execute SetCommand
+- SetCommand (-v,None)
|
jonathon
|
|
0a203f2e-8868-46b7-a492-ad74cd9d3138
|
2025/06/14 06:13:08
|
2025/06/14 06:13:08
|
2025/06/14 06:13:08
|
216 ms
|
309 ms
|
Listing tables 'catalog : null, schemaPattern : %, tableTypes : null, tableName : %'
|
CLOSED
|
|
jonathon
|
|
0b3af494-d430-4b8d-8d5c-c65a2a33f5fa
|
2025/06/13 07:55:30
|
2025/06/13 07:55:30
|
2025/06/13 07:55:30
|
40 ms
|
272 ms
|
set -v
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
SetCommand (-v,None)
== Analyzed Logical Plan ==
key: string, value: string, meaning: string, Since version: string
SetCommand (-v,None)
== Optimized Logical Plan ==
CommandResult [key#2177, value#2178, meaning#2179, Since version#2180], Execute SetCommand, [[spark.sql.adaptive.advisoryPartitionSizeInBytes,<value of spark.sql.adaptive.shuffle.targetPostShuffleInputSize>,The advisory size in bytes of the shuffle partition during adaptive optimization (when spark.sql.adaptive.enabled is true). It takes effect when Spark coalesces small shuffle partitions or splits skewed shuffle partition.,3.0.0], [spark.sql.adaptive.autoBroadcastJoinThreshold,<undefined>,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. The default value is same with spark.sql.autoBroadcastJoinThreshold. Note that, this config is used only in adaptive framework.,3.2.0], [spark.sql.adaptive.coalescePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will coalesce contiguous shuffle partitions according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid too many small tasks.,3.0.0], [spark.sql.adaptive.coalescePartitions.initialPartitionNum,<undefined>,The initial number of shuffle partitions before coalescing. If not set, it equals to spark.sql.shuffle.partitions. This configuration only has an effect when 'spark.sql.adaptive.enabled' and 'spark.sql.adaptive.coalescePartitions.enabled' are both true.,3.0.0], [spark.sql.adaptive.coalescePartitions.minPartitionSize,1MB,The minimum size of shuffle partitions after coalescing. This is useful when the adaptively calculated target size is too small during partition coalescing.,3.2.0], [spark.sql.adaptive.coalescePartitions.parallelismFirst,true,When true, Spark does not respect the target size specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes' (default 64MB) when coalescing contiguous shuffle partitions, but adaptively calculate the target size according to the default parallelism of the Spark cluster. The calculated size is usually smaller than the configured target size. This is to maximize the parallelism and avoid performance regression when enabling adaptive query execution. It's recommended to set this config to false and respect the configured target size.,3.2.0], [spark.sql.adaptive.customCostEvaluatorClass,<undefined>,The custom cost evaluator class to be used for adaptive execution. If not being set, Spark will use its own SimpleCostEvaluator by default.,3.2.0], [spark.sql.adaptive.enabled,true,When true, enable adaptive query execution, which re-optimizes the query plan in the middle of query execution, based on accurate runtime statistics.,1.6.0], [spark.sql.adaptive.forceOptimizeSkewedJoin,false,When true, force enable OptimizeSkewedJoin even if it introduces extra shuffle.,3.3.0], [spark.sql.adaptive.localShuffleReader.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark tries to use local shuffle reader to read the shuffle data when the shuffle partitioning is not needed, for example, after converting sort-merge join to broadcast-hash join.,3.0.0], [spark.sql.adaptive.maxShuffledHashJoinLocalMapThreshold,0b,Configures the maximum size in bytes per partition that can be allowed to build local hash map. If this value is not smaller than spark.sql.adaptive.advisoryPartitionSizeInBytes and all the partition size are not larger than this config, join selection prefer to use shuffled hash join instead of sort merge join regardless of the value of spark.sql.join.preferSortMergeJoin.,3.2.0], [spark.sql.adaptive.optimizeSkewsInRebalancePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will optimize the skewed shuffle partitions in RebalancePartitions and split them to smaller ones according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid data skew.,3.2.0], [spark.sql.adaptive.optimizer.excludedRules,<undefined>,Configures a list of rules to be disabled in the adaptive optimizer, in which the rules are specified by their rule names and separated by comma. The optimizer will log the rules that have indeed been excluded.,3.1.0], [spark.sql.adaptive.rebalancePartitionsSmallPartitionFactor,0.2,A partition will be merged during splitting if its size is small than this factor multiply spark.sql.adaptive.advisoryPartitionSizeInBytes.,3.3.0], [spark.sql.adaptive.skewJoin.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark dynamically handles skew in shuffled join (sort-merge and shuffled hash) by splitting (and replicating if needed) skewed partitions.,3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionFactor,5.0,A partition is considered as skewed if its size is larger than this factor multiplying the median partition size and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes',3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes,256MB,A partition is considered as skewed if its size in bytes is larger than this threshold and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionFactor' multiplying the median partition size. Ideally this config should be set larger than 'spark.sql.adaptive.advisoryPartitionSizeInBytes'.,3.0.0], [spark.sql.allowNamedFunctionArguments,true,If true, Spark will turn on support for named parameters for all functions that has it implemented.,3.5.0], [spark.sql.ansi.doubleQuotedIdentifiers,false,When true and 'spark.sql.ansi.enabled' is true, Spark SQL reads literals enclosed in double quoted (") as identifiers. When false they are read as string literals.,3.4.0], [spark.sql.ansi.enabled,false,When true, Spark SQL uses an ANSI compliant dialect instead of being Hive compliant. For example, Spark will throw an exception at runtime instead of returning null results when the inputs to a SQL operator/function are invalid.For full details of this dialect, you can find them in the section "ANSI Compliance" of Spark's documentation. Some ANSI dialect features may be not from the ANSI SQL standard directly, but their behaviors align with ANSI SQL's style,3.0.0], [spark.sql.ansi.enforceReservedKeywords,false,When true and 'spark.sql.ansi.enabled' is true, the Spark SQL parser enforces the ANSI reserved keywords and forbids SQL queries that use reserved keywords as alias names and/or identifiers for table, view, function, etc.,3.3.0], [spark.sql.ansi.relationPrecedence,false,When true and 'spark.sql.ansi.enabled' is true, JOIN takes precedence over comma when combining relation. For example, `t1, t2 JOIN t3` should result to `t1 X (t2 X t3)`. If the config is false, the result is `(t1 X t2) X t3`.,3.4.0], [spark.sql.autoBroadcastJoinThreshold,10MB,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. Note that currently statistics are only supported for Hive Metastore tables where the command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been run, and file-based data source tables where the statistics are computed directly on the files of data.,1.1.0], [spark.sql.avro.compression.codec,snappy,Compression codec used in writing of AVRO files. Supported codecs: uncompressed, deflate, snappy, bzip2, xz and zstandard. Default codec is snappy.,2.4.0], ... 183 more fields]
+- SetCommand (-v,None)
== Physical Plan ==
CommandResult [key#2177, value#2178, meaning#2179, Since version#2180]
+- Execute SetCommand
+- SetCommand (-v,None)
|
jonathan
|
|
0bf6ef0b-29be-47bc-ac42-32a3e198fd40
|
2025/06/13 22:51:51
|
2025/06/13 22:51:51
|
2025/06/13 22:51:51
|
11 ms
|
777 ms
|
Listing catalogs
|
CLOSED
|
|
jonathon
|
|
0d8fea16-473d-4a84-9320-5533504ea8f2
|
2025/06/14 06:31:58
|
2025/06/14 06:31:58
|
2025/06/14 06:31:58
|
80 ms
|
177 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#5183, data_type#5184, comment#5185]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5183, data_type#5184, comment#5185]
== Optimized Logical Plan ==
CommandResult [col_name#5183, data_type#5184, comment#5185], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5183, data_type#5184, comment#5185]
== Physical Plan ==
CommandResult [col_name#5183, data_type#5184, comment#5185]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5183, data_type#5184, comment#5185]
|
jonathon
|
|
0db6a993-9167-493d-8567-f01d986d02b8
|
2025/06/13 22:18:18
|
2025/06/13 22:18:18
|
2025/06/13 22:18:18
|
87 ms
|
186 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#2454, data_type#2455, comment#2456]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2454, data_type#2455, comment#2456]
== Optimized Logical Plan ==
CommandResult [col_name#2454, data_type#2455, comment#2456], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2454, data_type#2455, comment#2456]
== Physical Plan ==
CommandResult [col_name#2454, data_type#2455, comment#2456]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2454, data_type#2455, comment#2456]
|
jonathon
|
|
0f303db8-cb06-43c4-8d23-de2570a9098d
|
2025/06/13 23:29:59
|
2025/06/13 23:29:59
|
2025/06/13 23:29:59
|
25 ms
|
137 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathon
|
[42]
|
0fbc9c82-791c-4e57-af50-0ea0e2e1e9d6
|
2025/06/13 23:38:37
|
2025/06/13 23:38:37
|
2025/06/13 23:38:37
|
180 ms
|
278 ms
|
SELECT C_5 AS C_21, C_6 AS C_18, C_7 AS C_22, C_4331 AS C_14, C_4332 AS C_23, C_4333 AS C_17, C_0 AS C_20, C_1 AS C_13, C_43 AS C_12, C_4 AS C_19, C_2 AS C_24, C_10 AS C_15, C_11 AS C_16, C_9 AS C_25 FROM (SELECT C_64656661756c745f616972706f727473.`id` AS C_5, C_64656661756c745f616972706f727473.`type` AS C_6, C_64656661756c745f616972706f727473.`name` AS C_7, C_64656661756c745f616972706f727473.`lat` AS C_8, C_64656661756c745f616972706f727473.`lon` AS C_3, C_64656661756c745f616972706f727473.`elev` AS C_9, C_64656661756c745f616972706f727473.`continent` AS C_0, C_64656661756c745f616972706f727473.`country` AS C_1, C_64656661756c745f616972706f727473.`region` AS C_43, C_64656661756c745f616972706f727473.`city` AS C_4, C_64656661756c745f616972706f727473.`iata` AS C_2, C_64656661756c745f616972706f727473.`code` AS C_10, C_64656661756c745f616972706f727473.`gps` AS C_11, (round((C_64656661756c745f616972706f727473.`lat` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4331, (round((C_64656661756c745f616972706f727473.`lon` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4332, (round((C_64656661756c745f616972706f727473.`elev` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4333 FROM `default`.`airports` C_64656661756c745f616972706f727473 WHERE ((C_64656661756c745f616972706f727473.`lon` <= (- 1.040500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lon` >= (- 1.110500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lat` >= 4.100000000000000E+001) AND (C_64656661756c745f616972706f727473.`lat` <= 4.500000000000000E+001)) ) C_4954424c ORDER BY C_25 DESC LIMIT 5
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'GlobalLimit 5
+- 'LocalLimit 5
+- 'Sort ['C_25 DESC NULLS LAST], true
+- 'Project ['C_5 AS C_21#4304, 'C_6 AS C_18#4305, 'C_7 AS C_22#4306, 'C_4331 AS C_14#4307, 'C_4332 AS C_23#4308, 'C_4333 AS C_17#4309, 'C_0 AS C_20#4310, 'C_1 AS C_13#4311, 'C_43 AS C_12#4312, 'C_4 AS C_19#4313, 'C_2 AS C_24#4314, 'C_10 AS C_15#4315, 'C_11 AS C_16#4316, 'C_9 AS C_25#4317]
+- 'SubqueryAlias C_4954424c
+- 'Project ['C_64656661756c745f616972706f727473.id AS C_5#4288, 'C_64656661756c745f616972706f727473.type AS C_6#4289, 'C_64656661756c745f616972706f727473.name AS C_7#4290, 'C_64656661756c745f616972706f727473.lat AS C_8#4291, 'C_64656661756c745f616972706f727473.lon AS C_3#4292, 'C_64656661756c745f616972706f727473.elev AS C_9#4293, 'C_64656661756c745f616972706f727473.continent AS C_0#4294, 'C_64656661756c745f616972706f727473.country AS C_1#4295, 'C_64656661756c745f616972706f727473.region AS C_43#4296, 'C_64656661756c745f616972706f727473.city AS C_4#4297, 'C_64656661756c745f616972706f727473.iata AS C_2#4298, 'C_64656661756c745f616972706f727473.code AS C_10#4299, 'C_64656661756c745f616972706f727473.gps AS C_11#4300, ('round(('C_64656661756c745f616972706f727473.lat * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4331#4301, ('round(('C_64656661756c745f616972706f727473.lon * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4332#4302, ('round(('C_64656661756c745f616972706f727473.elev * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4333#4303]
+- 'Filter ((('C_64656661756c745f616972706f727473.lon <= -104.05) AND ('C_64656661756c745f616972706f727473.lon >= -111.05)) AND (('C_64656661756c745f616972706f727473.lat >= 41.0) AND ('C_64656661756c745f616972706f727473.lat <= 45.0)))
+- 'SubqueryAlias C_64656661756c745f616972706f727473
+- 'UnresolvedRelation [default, airports], [], false
== Analyzed Logical Plan ==
C_21: string, C_18: string, C_22: string, C_14: double, C_23: double, C_17: double, C_20: string, C_13: string, C_12: string, C_19: string, C_24: string, C_15: string, C_16: string, C_25: double
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_25#4317 DESC NULLS LAST], true
+- Project [C_5#4288 AS C_21#4304, C_6#4289 AS C_18#4305, C_7#4290 AS C_22#4306, C_4331#4301 AS C_14#4307, C_4332#4302 AS C_23#4308, C_4333#4303 AS C_17#4309, C_0#4294 AS C_20#4310, C_1#4295 AS C_13#4311, C_43#4296 AS C_12#4312, C_4#4297 AS C_19#4313, C_2#4298 AS C_24#4314, C_10#4299 AS C_15#4315, C_11#4300 AS C_16#4316, C_9#4293 AS C_25#4317]
+- SubqueryAlias C_4954424c
+- Project [id#4318 AS C_5#4288, type#4319 AS C_6#4289, name#4320 AS C_7#4290, lat#4321 AS C_8#4291, lon#4322 AS C_3#4292, elev#4323 AS C_9#4293, continent#4324 AS C_0#4294, country#4325 AS C_1#4295, region#4326 AS C_43#4296, city#4327 AS C_4#4297, iata#4328 AS C_2#4298, code#4329 AS C_10#4299, gps#4330 AS C_11#4300, (round((lat#4321 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4331#4301, (round((lon#4322 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4332#4302, (round((elev#4323 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4333#4303]
+- Filter (((lon#4322 <= -104.05) AND (lon#4322 >= -111.05)) AND ((lat#4321 >= 41.0) AND (lat#4321 <= 45.0)))
+- SubqueryAlias C_64656661756c745f616972706f727473
+- SubqueryAlias spark_catalog.default.airports
+- Relation spark_catalog.default.airports[id#4318,type#4319,name#4320,lat#4321,lon#4322,elev#4323,continent#4324,country#4325,region#4326,city#4327,iata#4328,code#4329,gps#4330] parquet
== Optimized Logical Plan ==
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_25#4317 DESC NULLS LAST], true
+- Project [id#4318 AS C_21#4304, type#4319 AS C_18#4305, name#4320 AS C_22#4306, (round((lat#4321 * 1000.0), 0) / 1000.0) AS C_14#4307, (round((lon#4322 * 1000.0), 0) / 1000.0) AS C_23#4308, (round((elev#4323 * 1000.0), 0) / 1000.0) AS C_17#4309, continent#4324 AS C_20#4310, country#4325 AS C_13#4311, region#4326 AS C_12#4312, city#4327 AS C_19#4313, iata#4328 AS C_24#4314, code#4329 AS C_15#4315, gps#4330 AS C_16#4316, elev#4323 AS C_25#4317]
+- Filter ((isnotnull(lon#4322) AND isnotnull(lat#4321)) AND (((lon#4322 <= -104.05) AND (lon#4322 >= -111.05)) AND ((lat#4321 >= 41.0) AND (lat#4321 <= 45.0))))
+- Relation spark_catalog.default.airports[id#4318,type#4319,name#4320,lat#4321,lon#4322,elev#4323,continent#4324,country#4325,region#4326,city#4327,iata#4328,code#4329,gps#4330] parquet
== Physical Plan ==
TakeOrderedAndProject(limit=5, orderBy=[C_25#4317 DESC NULLS LAST], output=[C_21#4304,C_18#4305,C_22#4306,C_14#4307,C_23#4308,C_17#4309,C_20#4310,C_13#4311,C_12#4312,C_19#4313,C_24#4314,C_15#4315,C_16#4316,C_25#4317])
+- *(1) Project [id#4318 AS C_21#4304, type#4319 AS C_18#4305, name#4320 AS C_22#4306, (round((lat#4321 * 1000.0), 0) / 1000.0) AS C_14#4307, (round((lon#4322 * 1000.0), 0) / 1000.0) AS C_23#4308, (round((elev#4323 * 1000.0), 0) / 1000.0) AS C_17#4309, continent#4324 AS C_20#4310, country#4325 AS C_13#4311, region#4326 AS C_12#4312, city#4327 AS C_19#4313, iata#4328 AS C_24#4314, code#4329 AS C_15#4315, gps#4330 AS C_16#4316, elev#4323 AS C_25#4317]
+- *(1) Filter (((((isnotnull(lon#4322) AND isnotnull(lat#4321)) AND (lon#4322 <= -104.05)) AND (lon#4322 >= -111.05)) AND (lat#4321 >= 41.0)) AND (lat#4321 <= 45.0))
+- *(1) ColumnarToRow
+- FileScan parquet spark_catalog.default.airports[id#4318,type#4319,name#4320,lat#4321,lon#4322,elev#4323,continent#4324,country#4325,region#4326,city#4327,iata#4328,code#4329,gps#4330] Batched: true, DataFilters: [isnotnull(lon#4322), isnotnull(lat#4321), (lon#4322 <= -104.05), (lon#4322 >= -111.05), (lat#432..., Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [IsNotNull(lon), IsNotNull(lat), LessThanOrEqual(lon,-104.05), GreaterThanOrEqual(lon,-111.05), G..., ReadSchema: struct<id:string,type:string,name:string,lat:double,lon:double,elev:double,continent:string,count...
|
jonathon
|
|
104fad28-c667-4108-95e3-8589bd1484de
|
2025/06/14 01:46:18
|
2025/06/14 01:46:18
|
2025/06/14 01:46:18
|
205 ms
|
297 ms
|
Listing tables 'catalog : null, schemaPattern : %, tableTypes : null, tableName : %'
|
CLOSED
|
|
jonathon
|
|
107e708d-28e8-47cf-bdff-8031a1fe0208
|
2025/06/15 06:43:47
|
2025/06/15 06:43:47
|
2025/06/15 06:43:47
|
42 ms
|
180 ms
|
set -v
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
SetCommand (-v,None)
== Analyzed Logical Plan ==
key: string, value: string, meaning: string, Since version: string
SetCommand (-v,None)
== Optimized Logical Plan ==
CommandResult [key#5279, value#5280, meaning#5281, Since version#5282], Execute SetCommand, [[spark.sql.adaptive.advisoryPartitionSizeInBytes,<value of spark.sql.adaptive.shuffle.targetPostShuffleInputSize>,The advisory size in bytes of the shuffle partition during adaptive optimization (when spark.sql.adaptive.enabled is true). It takes effect when Spark coalesces small shuffle partitions or splits skewed shuffle partition.,3.0.0], [spark.sql.adaptive.autoBroadcastJoinThreshold,<undefined>,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. The default value is same with spark.sql.autoBroadcastJoinThreshold. Note that, this config is used only in adaptive framework.,3.2.0], [spark.sql.adaptive.coalescePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will coalesce contiguous shuffle partitions according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid too many small tasks.,3.0.0], [spark.sql.adaptive.coalescePartitions.initialPartitionNum,<undefined>,The initial number of shuffle partitions before coalescing. If not set, it equals to spark.sql.shuffle.partitions. This configuration only has an effect when 'spark.sql.adaptive.enabled' and 'spark.sql.adaptive.coalescePartitions.enabled' are both true.,3.0.0], [spark.sql.adaptive.coalescePartitions.minPartitionSize,1MB,The minimum size of shuffle partitions after coalescing. This is useful when the adaptively calculated target size is too small during partition coalescing.,3.2.0], [spark.sql.adaptive.coalescePartitions.parallelismFirst,true,When true, Spark does not respect the target size specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes' (default 64MB) when coalescing contiguous shuffle partitions, but adaptively calculate the target size according to the default parallelism of the Spark cluster. The calculated size is usually smaller than the configured target size. This is to maximize the parallelism and avoid performance regression when enabling adaptive query execution. It's recommended to set this config to false and respect the configured target size.,3.2.0], [spark.sql.adaptive.customCostEvaluatorClass,<undefined>,The custom cost evaluator class to be used for adaptive execution. If not being set, Spark will use its own SimpleCostEvaluator by default.,3.2.0], [spark.sql.adaptive.enabled,true,When true, enable adaptive query execution, which re-optimizes the query plan in the middle of query execution, based on accurate runtime statistics.,1.6.0], [spark.sql.adaptive.forceOptimizeSkewedJoin,false,When true, force enable OptimizeSkewedJoin even if it introduces extra shuffle.,3.3.0], [spark.sql.adaptive.localShuffleReader.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark tries to use local shuffle reader to read the shuffle data when the shuffle partitioning is not needed, for example, after converting sort-merge join to broadcast-hash join.,3.0.0], [spark.sql.adaptive.maxShuffledHashJoinLocalMapThreshold,0b,Configures the maximum size in bytes per partition that can be allowed to build local hash map. If this value is not smaller than spark.sql.adaptive.advisoryPartitionSizeInBytes and all the partition size are not larger than this config, join selection prefer to use shuffled hash join instead of sort merge join regardless of the value of spark.sql.join.preferSortMergeJoin.,3.2.0], [spark.sql.adaptive.optimizeSkewsInRebalancePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will optimize the skewed shuffle partitions in RebalancePartitions and split them to smaller ones according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid data skew.,3.2.0], [spark.sql.adaptive.optimizer.excludedRules,<undefined>,Configures a list of rules to be disabled in the adaptive optimizer, in which the rules are specified by their rule names and separated by comma. The optimizer will log the rules that have indeed been excluded.,3.1.0], [spark.sql.adaptive.rebalancePartitionsSmallPartitionFactor,0.2,A partition will be merged during splitting if its size is small than this factor multiply spark.sql.adaptive.advisoryPartitionSizeInBytes.,3.3.0], [spark.sql.adaptive.skewJoin.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark dynamically handles skew in shuffled join (sort-merge and shuffled hash) by splitting (and replicating if needed) skewed partitions.,3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionFactor,5.0,A partition is considered as skewed if its size is larger than this factor multiplying the median partition size and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes',3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes,256MB,A partition is considered as skewed if its size in bytes is larger than this threshold and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionFactor' multiplying the median partition size. Ideally this config should be set larger than 'spark.sql.adaptive.advisoryPartitionSizeInBytes'.,3.0.0], [spark.sql.allowNamedFunctionArguments,true,If true, Spark will turn on support for named parameters for all functions that has it implemented.,3.5.0], [spark.sql.ansi.doubleQuotedIdentifiers,false,When true and 'spark.sql.ansi.enabled' is true, Spark SQL reads literals enclosed in double quoted (") as identifiers. When false they are read as string literals.,3.4.0], [spark.sql.ansi.enabled,false,When true, Spark SQL uses an ANSI compliant dialect instead of being Hive compliant. For example, Spark will throw an exception at runtime instead of returning null results when the inputs to a SQL operator/function are invalid.For full details of this dialect, you can find them in the section "ANSI Compliance" of Spark's documentation. Some ANSI dialect features may be not from the ANSI SQL standard directly, but their behaviors align with ANSI SQL's style,3.0.0], [spark.sql.ansi.enforceReservedKeywords,false,When true and 'spark.sql.ansi.enabled' is true, the Spark SQL parser enforces the ANSI reserved keywords and forbids SQL queries that use reserved keywords as alias names and/or identifiers for table, view, function, etc.,3.3.0], [spark.sql.ansi.relationPrecedence,false,When true and 'spark.sql.ansi.enabled' is true, JOIN takes precedence over comma when combining relation. For example, `t1, t2 JOIN t3` should result to `t1 X (t2 X t3)`. If the config is false, the result is `(t1 X t2) X t3`.,3.4.0], [spark.sql.autoBroadcastJoinThreshold,10MB,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. Note that currently statistics are only supported for Hive Metastore tables where the command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been run, and file-based data source tables where the statistics are computed directly on the files of data.,1.1.0], [spark.sql.avro.compression.codec,snappy,Compression codec used in writing of AVRO files. Supported codecs: uncompressed, deflate, snappy, bzip2, xz and zstandard. Default codec is snappy.,2.4.0], ... 183 more fields]
+- SetCommand (-v,None)
== Physical Plan ==
CommandResult [key#5279, value#5280, meaning#5281, Since version#5282]
+- Execute SetCommand
+- SetCommand (-v,None)
|
jonathon
|
|
111bfcf6-5c3d-4e0d-9fa3-213ea0f08ab4
|
2025/06/13 23:29:58
|
2025/06/13 23:29:58
|
2025/06/13 23:29:58
|
204 ms
|
312 ms
|
Listing tables 'catalog : null, schemaPattern : %, tableTypes : null, tableName : %'
|
CLOSED
|
|
jonathan
|
|
12459dfe-ccf0-4f29-ae6a-39888705b926
|
2025/06/13 23:27:02
|
2025/06/13 23:27:02
|
2025/06/13 23:27:02
|
25 ms
|
342 ms
|
SHOW TABLES IN `default`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#3748, tableName#3749, isTemporary#3750]
+- 'UnresolvedNamespace [default]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#3748, tableName#3749, isTemporary#3750]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [default]
== Optimized Logical Plan ==
CommandResult [namespace#3748, tableName#3749, isTemporary#3750], ShowTables [namespace#3748, tableName#3749, isTemporary#3750], V2SessionCatalog(spark_catalog), [default], [[0,2000000007,2800000008,0,746c7561666564,7374726f70726961], [0,2000000007,2800000008,0,746c7561666564,73657079746c6c61], [0,2000000007,2800000009,0,746c7561666564,73657079746c6c61,32], [0,2000000007,280000000d,0,746c7561666564,73657079746c6c61,6369736162], [0,2000000007,280000000e,0,746c7561666564,73657079746c6c61,326369736162], [0,2000000007,2800000009,0,746c7561666564,7079747961727261,65], [0,2000000007,280000000a,0,746c7561666564,7974746e69676962,6570], [0,2000000007,280000000a,0,746c7561666564,79747972616e6962,6570], [0,2000000007,2800000008,0,746c7561666564,6570797465746164], [0,2000000007,280000000b,0,746c7561666564,746c616d69636564,657079], [0,2000000007,2800000009,0,746c7561666564,70797474616f6c66,65], [0,2000000007,2800000008,0,746c7561666564,736570797470616d], [0,2000000007,280000000b,0,746c7561666564,646978617463796e,617461], [0,2000000007,280000000f,0,746c7561666564,746978617463796e,61746164706972], [0,2000000007,2800000010,0,746c7561666564,7365745f656d6f73,32656c6261745f74], [0,2000000007,280000000a,0,746c7561666564,7974746375727473,6570], [0,2000000007,280000000e,0,746c7561666564,656e6f7a69786174,70756b6f6f6c], [0,2000000007,280000000c,0,746c7561666564,74676e696b726f77,73657079], [0,2000000007,2800000016,0,746c7561666564,74676e696b726f77,6874697773657079,7265626d756e]]
+- ShowTables [namespace#3748, tableName#3749, isTemporary#3750]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [default]
== Physical Plan ==
CommandResult [namespace#3748, tableName#3749, isTemporary#3750]
+- ShowTables [namespace#3748, tableName#3749, isTemporary#3750], V2SessionCatalog(spark_catalog), [default]
|
jonathon
|
[47]
|
12fe5c7a-924f-42f4-a7d5-c14493d2d747
|
2025/06/14 06:13:09
|
2025/06/14 06:13:09
|
2025/06/14 06:13:09
|
239 ms
|
336 ms
|
SELECT C_43 AS C_21, C_0 AS C_12, C_1 AS C_17, C_4331 AS C_16, C_4332 AS C_25, C_4333 AS C_23, C_4 AS C_19, C_5 AS C_20, C_7 AS C_14, C_8 AS C_18, C_9 AS C_22, C_11 AS C_15, C_10 AS C_13, C_3 AS C_24 FROM (SELECT C_64656661756c745f616972706f727473.`id` AS C_43, C_64656661756c745f616972706f727473.`type` AS C_0, C_64656661756c745f616972706f727473.`name` AS C_1, C_64656661756c745f616972706f727473.`lat` AS C_2, C_64656661756c745f616972706f727473.`lon` AS C_6, C_64656661756c745f616972706f727473.`elev` AS C_3, C_64656661756c745f616972706f727473.`continent` AS C_4, C_64656661756c745f616972706f727473.`country` AS C_5, C_64656661756c745f616972706f727473.`region` AS C_7, C_64656661756c745f616972706f727473.`city` AS C_8, C_64656661756c745f616972706f727473.`iata` AS C_9, C_64656661756c745f616972706f727473.`code` AS C_11, C_64656661756c745f616972706f727473.`gps` AS C_10, (round((C_64656661756c745f616972706f727473.`lat` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4331, (round((C_64656661756c745f616972706f727473.`lon` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4332, (round((C_64656661756c745f616972706f727473.`elev` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4333 FROM `default`.`airports` C_64656661756c745f616972706f727473 WHERE ((C_64656661756c745f616972706f727473.`lon` <= (- 1.040500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lon` >= (- 1.110500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lat` >= 4.100000000000000E+001) AND (C_64656661756c745f616972706f727473.`lat` <= 4.500000000000000E+001)) ) C_4954424c ORDER BY C_24 DESC LIMIT 5
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'GlobalLimit 5
+- 'LocalLimit 5
+- 'Sort ['C_24 DESC NULLS LAST], true
+- 'Project ['C_43 AS C_21#5078, 'C_0 AS C_12#5079, 'C_1 AS C_17#5080, 'C_4331 AS C_16#5081, 'C_4332 AS C_25#5082, 'C_4333 AS C_23#5083, 'C_4 AS C_19#5084, 'C_5 AS C_20#5085, 'C_7 AS C_14#5086, 'C_8 AS C_18#5087, 'C_9 AS C_22#5088, 'C_11 AS C_15#5089, 'C_10 AS C_13#5090, 'C_3 AS C_24#5091]
+- 'SubqueryAlias C_4954424c
+- 'Project ['C_64656661756c745f616972706f727473.id AS C_43#5062, 'C_64656661756c745f616972706f727473.type AS C_0#5063, 'C_64656661756c745f616972706f727473.name AS C_1#5064, 'C_64656661756c745f616972706f727473.lat AS C_2#5065, 'C_64656661756c745f616972706f727473.lon AS C_6#5066, 'C_64656661756c745f616972706f727473.elev AS C_3#5067, 'C_64656661756c745f616972706f727473.continent AS C_4#5068, 'C_64656661756c745f616972706f727473.country AS C_5#5069, 'C_64656661756c745f616972706f727473.region AS C_7#5070, 'C_64656661756c745f616972706f727473.city AS C_8#5071, 'C_64656661756c745f616972706f727473.iata AS C_9#5072, 'C_64656661756c745f616972706f727473.code AS C_11#5073, 'C_64656661756c745f616972706f727473.gps AS C_10#5074, ('round(('C_64656661756c745f616972706f727473.lat * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4331#5075, ('round(('C_64656661756c745f616972706f727473.lon * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4332#5076, ('round(('C_64656661756c745f616972706f727473.elev * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4333#5077]
+- 'Filter ((('C_64656661756c745f616972706f727473.lon <= -104.05) AND ('C_64656661756c745f616972706f727473.lon >= -111.05)) AND (('C_64656661756c745f616972706f727473.lat >= 41.0) AND ('C_64656661756c745f616972706f727473.lat <= 45.0)))
+- 'SubqueryAlias C_64656661756c745f616972706f727473
+- 'UnresolvedRelation [default, airports], [], false
== Analyzed Logical Plan ==
C_21: string, C_12: string, C_17: string, C_16: double, C_25: double, C_23: double, C_19: string, C_20: string, C_14: string, C_18: string, C_22: string, C_15: string, C_13: string, C_24: double
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_24#5091 DESC NULLS LAST], true
+- Project [C_43#5062 AS C_21#5078, C_0#5063 AS C_12#5079, C_1#5064 AS C_17#5080, C_4331#5075 AS C_16#5081, C_4332#5076 AS C_25#5082, C_4333#5077 AS C_23#5083, C_4#5068 AS C_19#5084, C_5#5069 AS C_20#5085, C_7#5070 AS C_14#5086, C_8#5071 AS C_18#5087, C_9#5072 AS C_22#5088, C_11#5073 AS C_15#5089, C_10#5074 AS C_13#5090, C_3#5067 AS C_24#5091]
+- SubqueryAlias C_4954424c
+- Project [id#5092 AS C_43#5062, type#5093 AS C_0#5063, name#5094 AS C_1#5064, lat#5095 AS C_2#5065, lon#5096 AS C_6#5066, elev#5097 AS C_3#5067, continent#5098 AS C_4#5068, country#5099 AS C_5#5069, region#5100 AS C_7#5070, city#5101 AS C_8#5071, iata#5102 AS C_9#5072, code#5103 AS C_11#5073, gps#5104 AS C_10#5074, (round((lat#5095 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4331#5075, (round((lon#5096 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4332#5076, (round((elev#5097 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4333#5077]
+- Filter (((lon#5096 <= -104.05) AND (lon#5096 >= -111.05)) AND ((lat#5095 >= 41.0) AND (lat#5095 <= 45.0)))
+- SubqueryAlias C_64656661756c745f616972706f727473
+- SubqueryAlias spark_catalog.default.airports
+- Relation spark_catalog.default.airports[id#5092,type#5093,name#5094,lat#5095,lon#5096,elev#5097,continent#5098,country#5099,region#5100,city#5101,iata#5102,code#5103,gps#5104] parquet
== Optimized Logical Plan ==
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_24#5091 DESC NULLS LAST], true
+- Project [id#5092 AS C_21#5078, type#5093 AS C_12#5079, name#5094 AS C_17#5080, (round((lat#5095 * 1000.0), 0) / 1000.0) AS C_16#5081, (round((lon#5096 * 1000.0), 0) / 1000.0) AS C_25#5082, (round((elev#5097 * 1000.0), 0) / 1000.0) AS C_23#5083, continent#5098 AS C_19#5084, country#5099 AS C_20#5085, region#5100 AS C_14#5086, city#5101 AS C_18#5087, iata#5102 AS C_22#5088, code#5103 AS C_15#5089, gps#5104 AS C_13#5090, elev#5097 AS C_24#5091]
+- Filter ((isnotnull(lon#5096) AND isnotnull(lat#5095)) AND (((lon#5096 <= -104.05) AND (lon#5096 >= -111.05)) AND ((lat#5095 >= 41.0) AND (lat#5095 <= 45.0))))
+- Relation spark_catalog.default.airports[id#5092,type#5093,name#5094,lat#5095,lon#5096,elev#5097,continent#5098,country#5099,region#5100,city#5101,iata#5102,code#5103,gps#5104] parquet
== Physical Plan ==
TakeOrderedAndProject(limit=5, orderBy=[C_24#5091 DESC NULLS LAST], output=[C_21#5078,C_12#5079,C_17#5080,C_16#5081,C_25#5082,C_23#5083,C_19#5084,C_20#5085,C_14#5086,C_18#5087,C_22#5088,C_15#5089,C_13#5090,C_24#5091])
+- *(1) Project [id#5092 AS C_21#5078, type#5093 AS C_12#5079, name#5094 AS C_17#5080, (round((lat#5095 * 1000.0), 0) / 1000.0) AS C_16#5081, (round((lon#5096 * 1000.0), 0) / 1000.0) AS C_25#5082, (round((elev#5097 * 1000.0), 0) / 1000.0) AS C_23#5083, continent#5098 AS C_19#5084, country#5099 AS C_20#5085, region#5100 AS C_14#5086, city#5101 AS C_18#5087, iata#5102 AS C_22#5088, code#5103 AS C_15#5089, gps#5104 AS C_13#5090, elev#5097 AS C_24#5091]
+- *(1) Filter (((((isnotnull(lon#5096) AND isnotnull(lat#5095)) AND (lon#5096 <= -104.05)) AND (lon#5096 >= -111.05)) AND (lat#5095 >= 41.0)) AND (lat#5095 <= 45.0))
+- *(1) ColumnarToRow
+- FileScan parquet spark_catalog.default.airports[id#5092,type#5093,name#5094,lat#5095,lon#5096,elev#5097,continent#5098,country#5099,region#5100,city#5101,iata#5102,code#5103,gps#5104] Batched: true, DataFilters: [isnotnull(lon#5096), isnotnull(lat#5095), (lon#5096 <= -104.05), (lon#5096 >= -111.05), (lat#509..., Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [IsNotNull(lon), IsNotNull(lat), LessThanOrEqual(lon,-104.05), GreaterThanOrEqual(lon,-111.05), G..., ReadSchema: struct<id:string,type:string,name:string,lat:double,lon:double,elev:double,continent:string,count...
|
jonathan
|
|
131f0bc6-d31c-4dfe-9843-c76a400cfec6
|
2025/06/14 00:24:01
|
2025/06/14 00:24:01
|
2025/06/14 00:24:01
|
74 ms
|
343 ms
|
DESCRIBE TABLE `default`.`alltypes`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#4361, data_type#4362, comment#4363]
+- 'UnresolvedTableOrView [default, alltypes], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#4361, data_type#4362, comment#4363]
== Optimized Logical Plan ==
CommandResult [col_name#4361, data_type#4362, comment#4363], Execute DescribeTableCommand, [[STRING,string,null], [DOUBLE,double,null], [INTEGER,int,null], [BIGINT,bigint,null], [FLOAT,float,null], [DECIMAL,decimal(10,2),null], [NUMBER,decimal(10,2),null], [BOOLEAN,boolean,null], [DATE,date,null], [TIMESTAMP,timestamp,null], [DATETIME,timestamp,null], [BINARY,binary,null], [ARRAY,array<int>,null], [MAP,map<string,string>,null], [STRUCT,struct<field1:string,field2:int>,null], [VARCHAR,string,null], [CHAR,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#4361, data_type#4362, comment#4363]
== Physical Plan ==
CommandResult [col_name#4361, data_type#4362, comment#4363]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#4361, data_type#4362, comment#4363]
|
jonathan
|
|
133f0798-553e-4b5f-ad3c-a91bc9bcf2b6
|
2025/06/14 00:24:01
|
2025/06/14 00:24:02
|
2025/06/14 00:24:02
|
68 ms
|
343 ms
|
DESCRIBE TABLE `default`.`alltypes`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#4388, data_type#4389, comment#4390]
+- 'UnresolvedTableOrView [default, alltypes], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#4388, data_type#4389, comment#4390]
== Optimized Logical Plan ==
CommandResult [col_name#4388, data_type#4389, comment#4390], Execute DescribeTableCommand, [[STRING,string,null], [DOUBLE,double,null], [INTEGER,int,null], [BIGINT,bigint,null], [FLOAT,float,null], [DECIMAL,decimal(10,2),null], [NUMBER,decimal(10,2),null], [BOOLEAN,boolean,null], [DATE,date,null], [TIMESTAMP,timestamp,null], [DATETIME,timestamp,null], [BINARY,binary,null], [ARRAY,array<int>,null], [MAP,map<string,string>,null], [STRUCT,struct<field1:string,field2:int>,null], [VARCHAR,string,null], [CHAR,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#4388, data_type#4389, comment#4390]
== Physical Plan ==
CommandResult [col_name#4388, data_type#4389, comment#4390]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#4388, data_type#4389, comment#4390]
|
jonathan
|
|
14fe4125-ce7b-47ec-8eae-1f27d268514f
|
2025/06/13 23:27:01
|
2025/06/13 23:27:01
|
2025/06/13 23:27:02
|
28 ms
|
364 ms
|
Listing databases 'catalog : , schemaPattern : null'
|
CLOSED
|
|
jonathon
|
|
161d8f56-6f26-4534-8c1a-6823f620d0a1
|
2025/06/13 22:37:47
|
2025/06/13 22:37:48
|
2025/06/13 22:37:48
|
88 ms
|
184 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#2621, data_type#2622, comment#2623]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2621, data_type#2622, comment#2623]
== Optimized Logical Plan ==
CommandResult [col_name#2621, data_type#2622, comment#2623], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2621, data_type#2622, comment#2623]
== Physical Plan ==
CommandResult [col_name#2621, data_type#2622, comment#2623]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2621, data_type#2622, comment#2623]
|
jonathon
|
|
16d23e89-d3c2-4a47-bcec-a04ae65919ba
|
2025/06/14 06:31:58
|
2025/06/14 06:31:58
|
2025/06/14 06:31:58
|
86 ms
|
182 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#5160, data_type#5161, comment#5162]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5160, data_type#5161, comment#5162]
== Optimized Logical Plan ==
CommandResult [col_name#5160, data_type#5161, comment#5162], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5160, data_type#5161, comment#5162]
== Physical Plan ==
CommandResult [col_name#5160, data_type#5161, comment#5162]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5160, data_type#5161, comment#5162]
|
jonathon
|
[38]
|
17b645b6-20fb-40fb-bd88-8bff22d539bd
|
2025/06/13 23:20:56
|
2025/06/13 23:20:56
|
2025/06/13 23:20:56
|
185 ms
|
283 ms
|
SELECT C_0 AS C_13, C_43 AS C_14, C_5 AS C_15, C_4331 AS C_16, C_4332 AS C_12, C_4333 AS C_17, C_1 AS C_18, C_6 AS C_21, C_7 AS C_19, C_8 AS C_20, C_10 AS C_23, C_9 AS C_24, C_11 AS C_25, C_4 AS C_22 FROM (SELECT C_64656661756c745f616972706f727473.`id` AS C_0, C_64656661756c745f616972706f727473.`type` AS C_43, C_64656661756c745f616972706f727473.`name` AS C_5, C_64656661756c745f616972706f727473.`lat` AS C_3, C_64656661756c745f616972706f727473.`lon` AS C_2, C_64656661756c745f616972706f727473.`elev` AS C_4, C_64656661756c745f616972706f727473.`continent` AS C_1, C_64656661756c745f616972706f727473.`country` AS C_6, C_64656661756c745f616972706f727473.`region` AS C_7, C_64656661756c745f616972706f727473.`city` AS C_8, C_64656661756c745f616972706f727473.`iata` AS C_10, C_64656661756c745f616972706f727473.`code` AS C_9, C_64656661756c745f616972706f727473.`gps` AS C_11, (round((C_64656661756c745f616972706f727473.`lat` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4331, (round((C_64656661756c745f616972706f727473.`lon` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4332, (round((C_64656661756c745f616972706f727473.`elev` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4333 FROM `default`.`airports` C_64656661756c745f616972706f727473 WHERE ((C_64656661756c745f616972706f727473.`lon` <= (- 1.040500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lon` >= (- 1.110500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lat` >= 4.100000000000000E+001) AND (C_64656661756c745f616972706f727473.`lat` <= 4.500000000000000E+001)) ) C_4954424c ORDER BY C_22 DESC LIMIT 5
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'GlobalLimit 5
+- 'LocalLimit 5
+- 'Sort ['C_22 DESC NULLS LAST], true
+- 'Project ['C_0 AS C_13#3323, 'C_43 AS C_14#3324, 'C_5 AS C_15#3325, 'C_4331 AS C_16#3326, 'C_4332 AS C_12#3327, 'C_4333 AS C_17#3328, 'C_1 AS C_18#3329, 'C_6 AS C_21#3330, 'C_7 AS C_19#3331, 'C_8 AS C_20#3332, 'C_10 AS C_23#3333, 'C_9 AS C_24#3334, 'C_11 AS C_25#3335, 'C_4 AS C_22#3336]
+- 'SubqueryAlias C_4954424c
+- 'Project ['C_64656661756c745f616972706f727473.id AS C_0#3307, 'C_64656661756c745f616972706f727473.type AS C_43#3308, 'C_64656661756c745f616972706f727473.name AS C_5#3309, 'C_64656661756c745f616972706f727473.lat AS C_3#3310, 'C_64656661756c745f616972706f727473.lon AS C_2#3311, 'C_64656661756c745f616972706f727473.elev AS C_4#3312, 'C_64656661756c745f616972706f727473.continent AS C_1#3313, 'C_64656661756c745f616972706f727473.country AS C_6#3314, 'C_64656661756c745f616972706f727473.region AS C_7#3315, 'C_64656661756c745f616972706f727473.city AS C_8#3316, 'C_64656661756c745f616972706f727473.iata AS C_10#3317, 'C_64656661756c745f616972706f727473.code AS C_9#3318, 'C_64656661756c745f616972706f727473.gps AS C_11#3319, ('round(('C_64656661756c745f616972706f727473.lat * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4331#3320, ('round(('C_64656661756c745f616972706f727473.lon * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4332#3321, ('round(('C_64656661756c745f616972706f727473.elev * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4333#3322]
+- 'Filter ((('C_64656661756c745f616972706f727473.lon <= -104.05) AND ('C_64656661756c745f616972706f727473.lon >= -111.05)) AND (('C_64656661756c745f616972706f727473.lat >= 41.0) AND ('C_64656661756c745f616972706f727473.lat <= 45.0)))
+- 'SubqueryAlias C_64656661756c745f616972706f727473
+- 'UnresolvedRelation [default, airports], [], false
== Analyzed Logical Plan ==
C_13: string, C_14: string, C_15: string, C_16: double, C_12: double, C_17: double, C_18: string, C_21: string, C_19: string, C_20: string, C_23: string, C_24: string, C_25: string, C_22: double
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_22#3336 DESC NULLS LAST], true
+- Project [C_0#3307 AS C_13#3323, C_43#3308 AS C_14#3324, C_5#3309 AS C_15#3325, C_4331#3320 AS C_16#3326, C_4332#3321 AS C_12#3327, C_4333#3322 AS C_17#3328, C_1#3313 AS C_18#3329, C_6#3314 AS C_21#3330, C_7#3315 AS C_19#3331, C_8#3316 AS C_20#3332, C_10#3317 AS C_23#3333, C_9#3318 AS C_24#3334, C_11#3319 AS C_25#3335, C_4#3312 AS C_22#3336]
+- SubqueryAlias C_4954424c
+- Project [id#3337 AS C_0#3307, type#3338 AS C_43#3308, name#3339 AS C_5#3309, lat#3340 AS C_3#3310, lon#3341 AS C_2#3311, elev#3342 AS C_4#3312, continent#3343 AS C_1#3313, country#3344 AS C_6#3314, region#3345 AS C_7#3315, city#3346 AS C_8#3316, iata#3347 AS C_10#3317, code#3348 AS C_9#3318, gps#3349 AS C_11#3319, (round((lat#3340 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4331#3320, (round((lon#3341 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4332#3321, (round((elev#3342 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4333#3322]
+- Filter (((lon#3341 <= -104.05) AND (lon#3341 >= -111.05)) AND ((lat#3340 >= 41.0) AND (lat#3340 <= 45.0)))
+- SubqueryAlias C_64656661756c745f616972706f727473
+- SubqueryAlias spark_catalog.default.airports
+- Relation spark_catalog.default.airports[id#3337,type#3338,name#3339,lat#3340,lon#3341,elev#3342,continent#3343,country#3344,region#3345,city#3346,iata#3347,code#3348,gps#3349] parquet
== Optimized Logical Plan ==
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_22#3336 DESC NULLS LAST], true
+- Project [id#3337 AS C_13#3323, type#3338 AS C_14#3324, name#3339 AS C_15#3325, (round((lat#3340 * 1000.0), 0) / 1000.0) AS C_16#3326, (round((lon#3341 * 1000.0), 0) / 1000.0) AS C_12#3327, (round((elev#3342 * 1000.0), 0) / 1000.0) AS C_17#3328, continent#3343 AS C_18#3329, country#3344 AS C_21#3330, region#3345 AS C_19#3331, city#3346 AS C_20#3332, iata#3347 AS C_23#3333, code#3348 AS C_24#3334, gps#3349 AS C_25#3335, elev#3342 AS C_22#3336]
+- Filter ((isnotnull(lon#3341) AND isnotnull(lat#3340)) AND (((lon#3341 <= -104.05) AND (lon#3341 >= -111.05)) AND ((lat#3340 >= 41.0) AND (lat#3340 <= 45.0))))
+- Relation spark_catalog.default.airports[id#3337,type#3338,name#3339,lat#3340,lon#3341,elev#3342,continent#3343,country#3344,region#3345,city#3346,iata#3347,code#3348,gps#3349] parquet
== Physical Plan ==
TakeOrderedAndProject(limit=5, orderBy=[C_22#3336 DESC NULLS LAST], output=[C_13#3323,C_14#3324,C_15#3325,C_16#3326,C_12#3327,C_17#3328,C_18#3329,C_21#3330,C_19#3331,C_20#3332,C_23#3333,C_24#3334,C_25#3335,C_22#3336])
+- *(1) Project [id#3337 AS C_13#3323, type#3338 AS C_14#3324, name#3339 AS C_15#3325, (round((lat#3340 * 1000.0), 0) / 1000.0) AS C_16#3326, (round((lon#3341 * 1000.0), 0) / 1000.0) AS C_12#3327, (round((elev#3342 * 1000.0), 0) / 1000.0) AS C_17#3328, continent#3343 AS C_18#3329, country#3344 AS C_21#3330, region#3345 AS C_19#3331, city#3346 AS C_20#3332, iata#3347 AS C_23#3333, code#3348 AS C_24#3334, gps#3349 AS C_25#3335, elev#3342 AS C_22#3336]
+- *(1) Filter (((((isnotnull(lon#3341) AND isnotnull(lat#3340)) AND (lon#3341 <= -104.05)) AND (lon#3341 >= -111.05)) AND (lat#3340 >= 41.0)) AND (lat#3340 <= 45.0))
+- *(1) ColumnarToRow
+- FileScan parquet spark_catalog.default.airports[id#3337,type#3338,name#3339,lat#3340,lon#3341,elev#3342,continent#3343,country#3344,region#3345,city#3346,iata#3347,code#3348,gps#3349] Batched: true, DataFilters: [isnotnull(lon#3341), isnotnull(lat#3340), (lon#3341 <= -104.05), (lon#3341 >= -111.05), (lat#334..., Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [IsNotNull(lon), IsNotNull(lat), LessThanOrEqual(lon,-104.05), GreaterThanOrEqual(lon,-111.05), G..., ReadSchema: struct<id:string,type:string,name:string,lat:double,lon:double,elev:double,continent:string,count...
|
jonathan
|
|
1b050a71-1141-4469-a1b5-a1b8be6eaf70
|
2025/06/13 23:34:51
|
2025/06/13 23:34:51
|
2025/06/13 23:34:51
|
33 ms
|
343 ms
|
SHOW TABLES IN `onetableschema`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#4098, tableName#4099, isTemporary#4100]
+- 'UnresolvedNamespace [onetableschema]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#4098, tableName#4099, isTemporary#4100]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [onetableschema]
== Optimized Logical Plan ==
CommandResult [namespace#4098, tableName#4099, isTemporary#4100], ShowTables [namespace#4098, tableName#4099, isTemporary#4100], V2SessionCatalog(spark_catalog), [onetableschema], [[0,200000000e,300000000c,0,656c626174656e6f,616d65686373,73657079746c6c61,74736574]]
+- ShowTables [namespace#4098, tableName#4099, isTemporary#4100]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [onetableschema]
== Physical Plan ==
CommandResult [namespace#4098, tableName#4099, isTemporary#4100]
+- ShowTables [namespace#4098, tableName#4099, isTemporary#4100], V2SessionCatalog(spark_catalog), [onetableschema]
|
jonathon
|
[37]
|
1f24d235-f05b-48c7-98cd-daab7569b935
|
2025/06/13 22:44:56
|
2025/06/13 22:44:56
|
2025/06/13 22:44:56
|
187 ms
|
288 ms
|
SELECT C_9 AS C_25, C_2 AS C_22, C_7 AS C_19, C_4331 AS C_16, C_4332 AS C_18, C_4333 AS C_13, C_6 AS C_21, C_8 AS C_20, C_43 AS C_17, C_0 AS C_14, C_1 AS C_12, C_11 AS C_24, C_10 AS C_15, C_5 AS C_23 FROM (SELECT C_64656661756c745f616972706f727473.`id` AS C_9, C_64656661756c745f616972706f727473.`type` AS C_2, C_64656661756c745f616972706f727473.`name` AS C_7, C_64656661756c745f616972706f727473.`lat` AS C_3, C_64656661756c745f616972706f727473.`lon` AS C_4, C_64656661756c745f616972706f727473.`elev` AS C_5, C_64656661756c745f616972706f727473.`continent` AS C_6, C_64656661756c745f616972706f727473.`country` AS C_8, C_64656661756c745f616972706f727473.`region` AS C_43, C_64656661756c745f616972706f727473.`city` AS C_0, C_64656661756c745f616972706f727473.`iata` AS C_1, C_64656661756c745f616972706f727473.`code` AS C_11, C_64656661756c745f616972706f727473.`gps` AS C_10, (round((C_64656661756c745f616972706f727473.`lat` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4331, (round((C_64656661756c745f616972706f727473.`lon` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4332, (round((C_64656661756c745f616972706f727473.`elev` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4333 FROM `default`.`airports` C_64656661756c745f616972706f727473 WHERE ((C_64656661756c745f616972706f727473.`lon` <= (- 1.040500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lon` >= (- 1.110500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lat` >= 4.100000000000000E+001) AND (C_64656661756c745f616972706f727473.`lat` <= 4.500000000000000E+001)) ) C_4954424c ORDER BY C_23 DESC LIMIT 5
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'GlobalLimit 5
+- 'LocalLimit 5
+- 'Sort ['C_23 DESC NULLS LAST], true
+- 'Project ['C_9 AS C_25#2884, 'C_2 AS C_22#2885, 'C_7 AS C_19#2886, 'C_4331 AS C_16#2887, 'C_4332 AS C_18#2888, 'C_4333 AS C_13#2889, 'C_6 AS C_21#2890, 'C_8 AS C_20#2891, 'C_43 AS C_17#2892, 'C_0 AS C_14#2893, 'C_1 AS C_12#2894, 'C_11 AS C_24#2895, 'C_10 AS C_15#2896, 'C_5 AS C_23#2897]
+- 'SubqueryAlias C_4954424c
+- 'Project ['C_64656661756c745f616972706f727473.id AS C_9#2868, 'C_64656661756c745f616972706f727473.type AS C_2#2869, 'C_64656661756c745f616972706f727473.name AS C_7#2870, 'C_64656661756c745f616972706f727473.lat AS C_3#2871, 'C_64656661756c745f616972706f727473.lon AS C_4#2872, 'C_64656661756c745f616972706f727473.elev AS C_5#2873, 'C_64656661756c745f616972706f727473.continent AS C_6#2874, 'C_64656661756c745f616972706f727473.country AS C_8#2875, 'C_64656661756c745f616972706f727473.region AS C_43#2876, 'C_64656661756c745f616972706f727473.city AS C_0#2877, 'C_64656661756c745f616972706f727473.iata AS C_1#2878, 'C_64656661756c745f616972706f727473.code AS C_11#2879, 'C_64656661756c745f616972706f727473.gps AS C_10#2880, ('round(('C_64656661756c745f616972706f727473.lat * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4331#2881, ('round(('C_64656661756c745f616972706f727473.lon * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4332#2882, ('round(('C_64656661756c745f616972706f727473.elev * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4333#2883]
+- 'Filter ((('C_64656661756c745f616972706f727473.lon <= -104.05) AND ('C_64656661756c745f616972706f727473.lon >= -111.05)) AND (('C_64656661756c745f616972706f727473.lat >= 41.0) AND ('C_64656661756c745f616972706f727473.lat <= 45.0)))
+- 'SubqueryAlias C_64656661756c745f616972706f727473
+- 'UnresolvedRelation [default, airports], [], false
== Analyzed Logical Plan ==
C_25: string, C_22: string, C_19: string, C_16: double, C_18: double, C_13: double, C_21: string, C_20: string, C_17: string, C_14: string, C_12: string, C_24: string, C_15: string, C_23: double
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_23#2897 DESC NULLS LAST], true
+- Project [C_9#2868 AS C_25#2884, C_2#2869 AS C_22#2885, C_7#2870 AS C_19#2886, C_4331#2881 AS C_16#2887, C_4332#2882 AS C_18#2888, C_4333#2883 AS C_13#2889, C_6#2874 AS C_21#2890, C_8#2875 AS C_20#2891, C_43#2876 AS C_17#2892, C_0#2877 AS C_14#2893, C_1#2878 AS C_12#2894, C_11#2879 AS C_24#2895, C_10#2880 AS C_15#2896, C_5#2873 AS C_23#2897]
+- SubqueryAlias C_4954424c
+- Project [id#2898 AS C_9#2868, type#2899 AS C_2#2869, name#2900 AS C_7#2870, lat#2901 AS C_3#2871, lon#2902 AS C_4#2872, elev#2903 AS C_5#2873, continent#2904 AS C_6#2874, country#2905 AS C_8#2875, region#2906 AS C_43#2876, city#2907 AS C_0#2877, iata#2908 AS C_1#2878, code#2909 AS C_11#2879, gps#2910 AS C_10#2880, (round((lat#2901 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4331#2881, (round((lon#2902 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4332#2882, (round((elev#2903 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4333#2883]
+- Filter (((lon#2902 <= -104.05) AND (lon#2902 >= -111.05)) AND ((lat#2901 >= 41.0) AND (lat#2901 <= 45.0)))
+- SubqueryAlias C_64656661756c745f616972706f727473
+- SubqueryAlias spark_catalog.default.airports
+- Relation spark_catalog.default.airports[id#2898,type#2899,name#2900,lat#2901,lon#2902,elev#2903,continent#2904,country#2905,region#2906,city#2907,iata#2908,code#2909,gps#2910] parquet
== Optimized Logical Plan ==
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_23#2897 DESC NULLS LAST], true
+- Project [id#2898 AS C_25#2884, type#2899 AS C_22#2885, name#2900 AS C_19#2886, (round((lat#2901 * 1000.0), 0) / 1000.0) AS C_16#2887, (round((lon#2902 * 1000.0), 0) / 1000.0) AS C_18#2888, (round((elev#2903 * 1000.0), 0) / 1000.0) AS C_13#2889, continent#2904 AS C_21#2890, country#2905 AS C_20#2891, region#2906 AS C_17#2892, city#2907 AS C_14#2893, iata#2908 AS C_12#2894, code#2909 AS C_24#2895, gps#2910 AS C_15#2896, elev#2903 AS C_23#2897]
+- Filter ((isnotnull(lon#2902) AND isnotnull(lat#2901)) AND (((lon#2902 <= -104.05) AND (lon#2902 >= -111.05)) AND ((lat#2901 >= 41.0) AND (lat#2901 <= 45.0))))
+- Relation spark_catalog.default.airports[id#2898,type#2899,name#2900,lat#2901,lon#2902,elev#2903,continent#2904,country#2905,region#2906,city#2907,iata#2908,code#2909,gps#2910] parquet
== Physical Plan ==
TakeOrderedAndProject(limit=5, orderBy=[C_23#2897 DESC NULLS LAST], output=[C_25#2884,C_22#2885,C_19#2886,C_16#2887,C_18#2888,C_13#2889,C_21#2890,C_20#2891,C_17#2892,C_14#2893,C_12#2894,C_24#2895,C_15#2896,C_23#2897])
+- *(1) Project [id#2898 AS C_25#2884, type#2899 AS C_22#2885, name#2900 AS C_19#2886, (round((lat#2901 * 1000.0), 0) / 1000.0) AS C_16#2887, (round((lon#2902 * 1000.0), 0) / 1000.0) AS C_18#2888, (round((elev#2903 * 1000.0), 0) / 1000.0) AS C_13#2889, continent#2904 AS C_21#2890, country#2905 AS C_20#2891, region#2906 AS C_17#2892, city#2907 AS C_14#2893, iata#2908 AS C_12#2894, code#2909 AS C_24#2895, gps#2910 AS C_15#2896, elev#2903 AS C_23#2897]
+- *(1) Filter (((((isnotnull(lon#2902) AND isnotnull(lat#2901)) AND (lon#2902 <= -104.05)) AND (lon#2902 >= -111.05)) AND (lat#2901 >= 41.0)) AND (lat#2901 <= 45.0))
+- *(1) ColumnarToRow
+- FileScan parquet spark_catalog.default.airports[id#2898,type#2899,name#2900,lat#2901,lon#2902,elev#2903,continent#2904,country#2905,region#2906,city#2907,iata#2908,code#2909,gps#2910] Batched: true, DataFilters: [isnotnull(lon#2902), isnotnull(lat#2901), (lon#2902 <= -104.05), (lon#2902 >= -111.05), (lat#290..., Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [IsNotNull(lon), IsNotNull(lat), LessThanOrEqual(lon,-104.05), GreaterThanOrEqual(lon,-111.05), G..., ReadSchema: struct<id:string,type:string,name:string,lat:double,lon:double,elev:double,continent:string,count...
|
jonathon
|
|
2013b900-8b91-46bc-8f52-ea83e6bc1e23
|
2025/06/14 06:31:57
|
2025/06/14 06:31:57
|
2025/06/14 06:31:58
|
35 ms
|
177 ms
|
set -v
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
SetCommand (-v,None)
== Analyzed Logical Plan ==
key: string, value: string, meaning: string, Since version: string
SetCommand (-v,None)
== Optimized Logical Plan ==
CommandResult [key#5135, value#5136, meaning#5137, Since version#5138], Execute SetCommand, [[spark.sql.adaptive.advisoryPartitionSizeInBytes,<value of spark.sql.adaptive.shuffle.targetPostShuffleInputSize>,The advisory size in bytes of the shuffle partition during adaptive optimization (when spark.sql.adaptive.enabled is true). It takes effect when Spark coalesces small shuffle partitions or splits skewed shuffle partition.,3.0.0], [spark.sql.adaptive.autoBroadcastJoinThreshold,<undefined>,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. The default value is same with spark.sql.autoBroadcastJoinThreshold. Note that, this config is used only in adaptive framework.,3.2.0], [spark.sql.adaptive.coalescePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will coalesce contiguous shuffle partitions according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid too many small tasks.,3.0.0], [spark.sql.adaptive.coalescePartitions.initialPartitionNum,<undefined>,The initial number of shuffle partitions before coalescing. If not set, it equals to spark.sql.shuffle.partitions. This configuration only has an effect when 'spark.sql.adaptive.enabled' and 'spark.sql.adaptive.coalescePartitions.enabled' are both true.,3.0.0], [spark.sql.adaptive.coalescePartitions.minPartitionSize,1MB,The minimum size of shuffle partitions after coalescing. This is useful when the adaptively calculated target size is too small during partition coalescing.,3.2.0], [spark.sql.adaptive.coalescePartitions.parallelismFirst,true,When true, Spark does not respect the target size specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes' (default 64MB) when coalescing contiguous shuffle partitions, but adaptively calculate the target size according to the default parallelism of the Spark cluster. The calculated size is usually smaller than the configured target size. This is to maximize the parallelism and avoid performance regression when enabling adaptive query execution. It's recommended to set this config to false and respect the configured target size.,3.2.0], [spark.sql.adaptive.customCostEvaluatorClass,<undefined>,The custom cost evaluator class to be used for adaptive execution. If not being set, Spark will use its own SimpleCostEvaluator by default.,3.2.0], [spark.sql.adaptive.enabled,true,When true, enable adaptive query execution, which re-optimizes the query plan in the middle of query execution, based on accurate runtime statistics.,1.6.0], [spark.sql.adaptive.forceOptimizeSkewedJoin,false,When true, force enable OptimizeSkewedJoin even if it introduces extra shuffle.,3.3.0], [spark.sql.adaptive.localShuffleReader.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark tries to use local shuffle reader to read the shuffle data when the shuffle partitioning is not needed, for example, after converting sort-merge join to broadcast-hash join.,3.0.0], [spark.sql.adaptive.maxShuffledHashJoinLocalMapThreshold,0b,Configures the maximum size in bytes per partition that can be allowed to build local hash map. If this value is not smaller than spark.sql.adaptive.advisoryPartitionSizeInBytes and all the partition size are not larger than this config, join selection prefer to use shuffled hash join instead of sort merge join regardless of the value of spark.sql.join.preferSortMergeJoin.,3.2.0], [spark.sql.adaptive.optimizeSkewsInRebalancePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will optimize the skewed shuffle partitions in RebalancePartitions and split them to smaller ones according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid data skew.,3.2.0], [spark.sql.adaptive.optimizer.excludedRules,<undefined>,Configures a list of rules to be disabled in the adaptive optimizer, in which the rules are specified by their rule names and separated by comma. The optimizer will log the rules that have indeed been excluded.,3.1.0], [spark.sql.adaptive.rebalancePartitionsSmallPartitionFactor,0.2,A partition will be merged during splitting if its size is small than this factor multiply spark.sql.adaptive.advisoryPartitionSizeInBytes.,3.3.0], [spark.sql.adaptive.skewJoin.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark dynamically handles skew in shuffled join (sort-merge and shuffled hash) by splitting (and replicating if needed) skewed partitions.,3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionFactor,5.0,A partition is considered as skewed if its size is larger than this factor multiplying the median partition size and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes',3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes,256MB,A partition is considered as skewed if its size in bytes is larger than this threshold and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionFactor' multiplying the median partition size. Ideally this config should be set larger than 'spark.sql.adaptive.advisoryPartitionSizeInBytes'.,3.0.0], [spark.sql.allowNamedFunctionArguments,true,If true, Spark will turn on support for named parameters for all functions that has it implemented.,3.5.0], [spark.sql.ansi.doubleQuotedIdentifiers,false,When true and 'spark.sql.ansi.enabled' is true, Spark SQL reads literals enclosed in double quoted (") as identifiers. When false they are read as string literals.,3.4.0], [spark.sql.ansi.enabled,false,When true, Spark SQL uses an ANSI compliant dialect instead of being Hive compliant. For example, Spark will throw an exception at runtime instead of returning null results when the inputs to a SQL operator/function are invalid.For full details of this dialect, you can find them in the section "ANSI Compliance" of Spark's documentation. Some ANSI dialect features may be not from the ANSI SQL standard directly, but their behaviors align with ANSI SQL's style,3.0.0], [spark.sql.ansi.enforceReservedKeywords,false,When true and 'spark.sql.ansi.enabled' is true, the Spark SQL parser enforces the ANSI reserved keywords and forbids SQL queries that use reserved keywords as alias names and/or identifiers for table, view, function, etc.,3.3.0], [spark.sql.ansi.relationPrecedence,false,When true and 'spark.sql.ansi.enabled' is true, JOIN takes precedence over comma when combining relation. For example, `t1, t2 JOIN t3` should result to `t1 X (t2 X t3)`. If the config is false, the result is `(t1 X t2) X t3`.,3.4.0], [spark.sql.autoBroadcastJoinThreshold,10MB,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. Note that currently statistics are only supported for Hive Metastore tables where the command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been run, and file-based data source tables where the statistics are computed directly on the files of data.,1.1.0], [spark.sql.avro.compression.codec,snappy,Compression codec used in writing of AVRO files. Supported codecs: uncompressed, deflate, snappy, bzip2, xz and zstandard. Default codec is snappy.,2.4.0], ... 183 more fields]
+- SetCommand (-v,None)
== Physical Plan ==
CommandResult [key#5135, value#5136, meaning#5137, Since version#5138]
+- Execute SetCommand
+- SetCommand (-v,None)
|
jonathan
|
|
206183d3-e6ac-420f-8a9e-1eb86dc07025
|
2025/06/13 23:34:49
|
2025/06/13 23:34:49
|
2025/06/13 23:34:49
|
11 ms
|
663 ms
|
Listing catalogs
|
CLOSED
|
|
jonathan
|
|
215ae1d6-e907-4ea3-a95a-762cd54e7fb6
|
2025/06/13 22:51:52
|
2025/06/13 22:51:52
|
2025/06/13 22:51:52
|
38 ms
|
365 ms
|
SHOW TABLES IN `c3ba675f1fb64660ba4a90155b35924e`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#2941, tableName#2942, isTemporary#2943]
+- 'UnresolvedNamespace [c3ba675f1fb64660ba4a90155b35924e]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#2941, tableName#2942, isTemporary#2943]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e]
== Optimized Logical Plan ==
CommandResult [namespace#2941, tableName#2942, isTemporary#2943], ShowTables [namespace#2941, tableName#2942, isTemporary#2943], V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e], [[0,2000000020,400000000c,0,6635373661623363,3036363436626631,3531303961346162,6534323935336235,69746e656469796d,72656966]]
+- ShowTables [namespace#2941, tableName#2942, isTemporary#2943]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e]
== Physical Plan ==
CommandResult [namespace#2941, tableName#2942, isTemporary#2943]
+- ShowTables [namespace#2941, tableName#2942, isTemporary#2943], V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e]
|
jonathon
|
|
22a79b78-383b-4630-9d21-3f3edc29d332
|
2025/06/14 01:47:47
|
2025/06/14 01:47:48
|
2025/06/14 01:47:48
|
29 ms
|
124 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathan
|
|
23d0bb45-2016-4751-8b44-535843d1a17f
|
2025/06/13 23:21:11
|
2025/06/13 23:21:11
|
2025/06/13 23:21:11
|
49 ms
|
317 ms
|
SHOW TABLES IN `default`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#3400, tableName#3401, isTemporary#3402]
+- 'UnresolvedNamespace [default]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#3400, tableName#3401, isTemporary#3402]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [default]
== Optimized Logical Plan ==
CommandResult [namespace#3400, tableName#3401, isTemporary#3402], ShowTables [namespace#3400, tableName#3401, isTemporary#3402], V2SessionCatalog(spark_catalog), [default], [[0,2000000007,2800000008,0,746c7561666564,7374726f70726961], [0,2000000007,2800000008,0,746c7561666564,73657079746c6c61], [0,2000000007,2800000009,0,746c7561666564,73657079746c6c61,32], [0,2000000007,280000000d,0,746c7561666564,73657079746c6c61,6369736162], [0,2000000007,280000000e,0,746c7561666564,73657079746c6c61,326369736162], [0,2000000007,2800000009,0,746c7561666564,7079747961727261,65], [0,2000000007,280000000a,0,746c7561666564,7974746e69676962,6570], [0,2000000007,280000000a,0,746c7561666564,79747972616e6962,6570], [0,2000000007,2800000008,0,746c7561666564,6570797465746164], [0,2000000007,280000000b,0,746c7561666564,746c616d69636564,657079], [0,2000000007,2800000009,0,746c7561666564,70797474616f6c66,65], [0,2000000007,2800000008,0,746c7561666564,736570797470616d], [0,2000000007,280000000b,0,746c7561666564,646978617463796e,617461], [0,2000000007,280000000f,0,746c7561666564,746978617463796e,61746164706972], [0,2000000007,2800000010,0,746c7561666564,7365745f656d6f73,32656c6261745f74], [0,2000000007,280000000a,0,746c7561666564,7974746375727473,6570], [0,2000000007,280000000e,0,746c7561666564,656e6f7a69786174,70756b6f6f6c], [0,2000000007,280000000c,0,746c7561666564,74676e696b726f77,73657079], [0,2000000007,2800000016,0,746c7561666564,74676e696b726f77,6874697773657079,7265626d756e]]
+- ShowTables [namespace#3400, tableName#3401, isTemporary#3402]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [default]
== Physical Plan ==
CommandResult [namespace#3400, tableName#3401, isTemporary#3402]
+- ShowTables [namespace#3400, tableName#3401, isTemporary#3402], V2SessionCatalog(spark_catalog), [default]
|
jonathon
|
|
24b341de-4aa7-4439-9d38-e87f336daa63
|
2025/06/13 07:55:31
|
2025/06/13 07:55:31
|
2025/06/13 07:55:31
|
49 ms
|
203 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathon
|
|
25c0dc0c-2918-46d9-b142-a73770f16321
|
2025/06/13 23:20:56
|
2025/06/13 23:20:56
|
2025/06/13 23:20:56
|
85 ms
|
182 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#3261, data_type#3262, comment#3263]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#3261, data_type#3262, comment#3263]
== Optimized Logical Plan ==
CommandResult [col_name#3261, data_type#3262, comment#3263], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#3261, data_type#3262, comment#3263]
== Physical Plan ==
CommandResult [col_name#3261, data_type#3262, comment#3263]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#3261, data_type#3262, comment#3263]
|
jonathon
|
|
2641dfe3-8486-4965-b35e-af9a5acad4e5
|
2025/06/13 07:16:52
|
2025/06/13 07:16:52
|
2025/06/13 07:16:52
|
29 ms
|
125 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathan
|
|
2952ca04-a82d-4b6f-bba3-517aa8866d70
|
2025/06/13 22:54:12
|
2025/06/13 22:54:12
|
2025/06/13 22:54:12
|
27 ms
|
361 ms
|
SHOW TABLES IN `test`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#3081, tableName#3082, isTemporary#3083]
+- 'UnresolvedNamespace [test]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#3081, tableName#3082, isTemporary#3083]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [test]
== Optimized Logical Plan ==
CommandResult [namespace#3081, tableName#3082, isTemporary#3083], ShowTables [namespace#3081, tableName#3082, isTemporary#3083], V2SessionCatalog(spark_catalog), [test]
+- ShowTables [namespace#3081, tableName#3082, isTemporary#3083]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [test]
== Physical Plan ==
CommandResult <empty>, [namespace#3081, tableName#3082, isTemporary#3083]
+- ShowTables [namespace#3081, tableName#3082, isTemporary#3083], V2SessionCatalog(spark_catalog), [test]
|
jonathon
|
|
2d9802cf-8cba-4a98-97b3-b0a3f26b46b3
|
2025/06/14 01:46:18
|
2025/06/14 01:46:18
|
2025/06/14 01:46:19
|
59 ms
|
451 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathon
|
|
2e20f6db-4145-4b48-b397-bbea3e2aa67d
|
2025/06/15 06:45:39
|
2025/06/15 06:45:39
|
2025/06/15 06:45:39
|
26 ms
|
179 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathan
|
|
2fe876c5-3a65-4db2-bf8a-7b713edf57cc
|
2025/06/13 23:27:00
|
2025/06/13 23:27:00
|
2025/06/13 23:27:00
|
55 ms
|
322 ms
|
DESCRIBE TABLE `default`.`alltypes`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#3701, data_type#3702, comment#3703]
+- 'UnresolvedTableOrView [default, alltypes], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3701, data_type#3702, comment#3703]
== Optimized Logical Plan ==
CommandResult [col_name#3701, data_type#3702, comment#3703], Execute DescribeTableCommand, [[STRING,string,null], [DOUBLE,double,null], [INTEGER,int,null], [BIGINT,bigint,null], [FLOAT,float,null], [DECIMAL,decimal(10,2),null], [NUMBER,decimal(10,2),null], [BOOLEAN,boolean,null], [DATE,date,null], [TIMESTAMP,timestamp,null], [DATETIME,timestamp,null], [BINARY,binary,null], [ARRAY,array<int>,null], [MAP,map<string,string>,null], [STRUCT,struct<field1:string,field2:int>,null], [VARCHAR,string,null], [CHAR,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3701, data_type#3702, comment#3703]
== Physical Plan ==
CommandResult [col_name#3701, data_type#3702, comment#3703]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3701, data_type#3702, comment#3703]
|
jonathon
|
|
31082431-b5fd-4dcf-aa9f-4ac9f4f0b874
|
2025/06/15 06:48:31
|
2025/06/15 06:48:31
|
2025/06/15 06:48:31
|
45 ms
|
138 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathan
|
|
32526ce1-c7c7-4fb6-9e54-86de31256f92
|
2025/06/13 23:34:49
|
2025/06/13 23:34:49
|
2025/06/13 23:34:49
|
29 ms
|
337 ms
|
Listing databases 'catalog : , schemaPattern : null'
|
CLOSED
|
|
jonathon
|
|
339b74a3-7492-4035-b9f6-bfff4a4916de
|
2025/06/13 23:20:55
|
2025/06/13 23:20:55
|
2025/06/13 23:20:55
|
40 ms
|
184 ms
|
set -v
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
SetCommand (-v,None)
== Analyzed Logical Plan ==
key: string, value: string, meaning: string, Since version: string
SetCommand (-v,None)
== Optimized Logical Plan ==
CommandResult [key#3236, value#3237, meaning#3238, Since version#3239], Execute SetCommand, [[spark.sql.adaptive.advisoryPartitionSizeInBytes,<value of spark.sql.adaptive.shuffle.targetPostShuffleInputSize>,The advisory size in bytes of the shuffle partition during adaptive optimization (when spark.sql.adaptive.enabled is true). It takes effect when Spark coalesces small shuffle partitions or splits skewed shuffle partition.,3.0.0], [spark.sql.adaptive.autoBroadcastJoinThreshold,<undefined>,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. The default value is same with spark.sql.autoBroadcastJoinThreshold. Note that, this config is used only in adaptive framework.,3.2.0], [spark.sql.adaptive.coalescePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will coalesce contiguous shuffle partitions according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid too many small tasks.,3.0.0], [spark.sql.adaptive.coalescePartitions.initialPartitionNum,<undefined>,The initial number of shuffle partitions before coalescing. If not set, it equals to spark.sql.shuffle.partitions. This configuration only has an effect when 'spark.sql.adaptive.enabled' and 'spark.sql.adaptive.coalescePartitions.enabled' are both true.,3.0.0], [spark.sql.adaptive.coalescePartitions.minPartitionSize,1MB,The minimum size of shuffle partitions after coalescing. This is useful when the adaptively calculated target size is too small during partition coalescing.,3.2.0], [spark.sql.adaptive.coalescePartitions.parallelismFirst,true,When true, Spark does not respect the target size specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes' (default 64MB) when coalescing contiguous shuffle partitions, but adaptively calculate the target size according to the default parallelism of the Spark cluster. The calculated size is usually smaller than the configured target size. This is to maximize the parallelism and avoid performance regression when enabling adaptive query execution. It's recommended to set this config to false and respect the configured target size.,3.2.0], [spark.sql.adaptive.customCostEvaluatorClass,<undefined>,The custom cost evaluator class to be used for adaptive execution. If not being set, Spark will use its own SimpleCostEvaluator by default.,3.2.0], [spark.sql.adaptive.enabled,true,When true, enable adaptive query execution, which re-optimizes the query plan in the middle of query execution, based on accurate runtime statistics.,1.6.0], [spark.sql.adaptive.forceOptimizeSkewedJoin,false,When true, force enable OptimizeSkewedJoin even if it introduces extra shuffle.,3.3.0], [spark.sql.adaptive.localShuffleReader.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark tries to use local shuffle reader to read the shuffle data when the shuffle partitioning is not needed, for example, after converting sort-merge join to broadcast-hash join.,3.0.0], [spark.sql.adaptive.maxShuffledHashJoinLocalMapThreshold,0b,Configures the maximum size in bytes per partition that can be allowed to build local hash map. If this value is not smaller than spark.sql.adaptive.advisoryPartitionSizeInBytes and all the partition size are not larger than this config, join selection prefer to use shuffled hash join instead of sort merge join regardless of the value of spark.sql.join.preferSortMergeJoin.,3.2.0], [spark.sql.adaptive.optimizeSkewsInRebalancePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will optimize the skewed shuffle partitions in RebalancePartitions and split them to smaller ones according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid data skew.,3.2.0], [spark.sql.adaptive.optimizer.excludedRules,<undefined>,Configures a list of rules to be disabled in the adaptive optimizer, in which the rules are specified by their rule names and separated by comma. The optimizer will log the rules that have indeed been excluded.,3.1.0], [spark.sql.adaptive.rebalancePartitionsSmallPartitionFactor,0.2,A partition will be merged during splitting if its size is small than this factor multiply spark.sql.adaptive.advisoryPartitionSizeInBytes.,3.3.0], [spark.sql.adaptive.skewJoin.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark dynamically handles skew in shuffled join (sort-merge and shuffled hash) by splitting (and replicating if needed) skewed partitions.,3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionFactor,5.0,A partition is considered as skewed if its size is larger than this factor multiplying the median partition size and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes',3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes,256MB,A partition is considered as skewed if its size in bytes is larger than this threshold and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionFactor' multiplying the median partition size. Ideally this config should be set larger than 'spark.sql.adaptive.advisoryPartitionSizeInBytes'.,3.0.0], [spark.sql.allowNamedFunctionArguments,true,If true, Spark will turn on support for named parameters for all functions that has it implemented.,3.5.0], [spark.sql.ansi.doubleQuotedIdentifiers,false,When true and 'spark.sql.ansi.enabled' is true, Spark SQL reads literals enclosed in double quoted (") as identifiers. When false they are read as string literals.,3.4.0], [spark.sql.ansi.enabled,false,When true, Spark SQL uses an ANSI compliant dialect instead of being Hive compliant. For example, Spark will throw an exception at runtime instead of returning null results when the inputs to a SQL operator/function are invalid.For full details of this dialect, you can find them in the section "ANSI Compliance" of Spark's documentation. Some ANSI dialect features may be not from the ANSI SQL standard directly, but their behaviors align with ANSI SQL's style,3.0.0], [spark.sql.ansi.enforceReservedKeywords,false,When true and 'spark.sql.ansi.enabled' is true, the Spark SQL parser enforces the ANSI reserved keywords and forbids SQL queries that use reserved keywords as alias names and/or identifiers for table, view, function, etc.,3.3.0], [spark.sql.ansi.relationPrecedence,false,When true and 'spark.sql.ansi.enabled' is true, JOIN takes precedence over comma when combining relation. For example, `t1, t2 JOIN t3` should result to `t1 X (t2 X t3)`. If the config is false, the result is `(t1 X t2) X t3`.,3.4.0], [spark.sql.autoBroadcastJoinThreshold,10MB,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. Note that currently statistics are only supported for Hive Metastore tables where the command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been run, and file-based data source tables where the statistics are computed directly on the files of data.,1.1.0], [spark.sql.avro.compression.codec,snappy,Compression codec used in writing of AVRO files. Supported codecs: uncompressed, deflate, snappy, bzip2, xz and zstandard. Default codec is snappy.,2.4.0], ... 183 more fields]
+- SetCommand (-v,None)
== Physical Plan ==
CommandResult [key#3236, value#3237, meaning#3238, Since version#3239]
+- Execute SetCommand
+- SetCommand (-v,None)
|
jonathan
|
|
35f04b2d-808f-4f23-b9f6-508233a04899
|
2025/06/13 22:54:10
|
2025/06/13 22:54:11
|
2025/06/13 22:54:11
|
50 ms
|
375 ms
|
SHOW TABLES IN `c3ba675f1fb64660ba4a90155b35924e`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#3021, tableName#3022, isTemporary#3023]
+- 'UnresolvedNamespace [c3ba675f1fb64660ba4a90155b35924e]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#3021, tableName#3022, isTemporary#3023]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e]
== Optimized Logical Plan ==
CommandResult [namespace#3021, tableName#3022, isTemporary#3023], ShowTables [namespace#3021, tableName#3022, isTemporary#3023], V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e], [[0,2000000020,400000000c,0,6635373661623363,3036363436626631,3531303961346162,6534323935336235,69746e656469796d,72656966]]
+- ShowTables [namespace#3021, tableName#3022, isTemporary#3023]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e]
== Physical Plan ==
CommandResult [namespace#3021, tableName#3022, isTemporary#3023]
+- ShowTables [namespace#3021, tableName#3022, isTemporary#3023], V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e]
|
jonathon
|
|
3688010e-d716-46d7-b78f-abecb7972de1
|
2025/06/15 06:45:38
|
2025/06/15 06:45:38
|
2025/06/15 06:45:38
|
45 ms
|
197 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathon
|
|
37820b88-e109-4800-b1c9-3a945289714b
|
2025/06/15 06:48:31
|
2025/06/15 06:48:31
|
2025/06/15 06:48:31
|
79 ms
|
175 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#5592, data_type#5593, comment#5594]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5592, data_type#5593, comment#5594]
== Optimized Logical Plan ==
CommandResult [col_name#5592, data_type#5593, comment#5594], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5592, data_type#5593, comment#5594]
== Physical Plan ==
CommandResult [col_name#5592, data_type#5593, comment#5594]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5592, data_type#5593, comment#5594]
|
jonathon
|
|
3ab9b884-54e8-4b6f-9c4b-ab186038af66
|
2025/06/14 01:23:34
|
2025/06/14 01:23:34
|
2025/06/14 01:23:34
|
53 ms
|
206 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathon
|
|
3ee84de5-2040-4fb0-80e7-a039974f2aae
|
2025/06/14 01:23:34
|
2025/06/14 01:23:34
|
2025/06/14 01:23:34
|
91 ms
|
248 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#4440, data_type#4441, comment#4442]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4440, data_type#4441, comment#4442]
== Optimized Logical Plan ==
CommandResult [col_name#4440, data_type#4441, comment#4442], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4440, data_type#4441, comment#4442]
== Physical Plan ==
CommandResult [col_name#4440, data_type#4441, comment#4442]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4440, data_type#4441, comment#4442]
|
jonathon
|
|
3f361a2e-254f-4c9f-b8c0-14c65350c9b4
|
2025/06/13 22:18:17
|
2025/06/13 22:18:17
|
2025/06/13 22:18:18
|
221 ms
|
319 ms
|
Listing tables 'catalog : null, schemaPattern : %, tableTypes : null, tableName : %'
|
CLOSED
|
|
jonathon
|
|
4149da57-09ce-4a6d-9599-bc8e97834262
|
2025/06/15 06:48:31
|
2025/06/15 06:48:31
|
2025/06/15 06:48:31
|
79 ms
|
175 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#5615, data_type#5616, comment#5617]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5615, data_type#5616, comment#5617]
== Optimized Logical Plan ==
CommandResult [col_name#5615, data_type#5616, comment#5617], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5615, data_type#5616, comment#5617]
== Physical Plan ==
CommandResult [col_name#5615, data_type#5616, comment#5617]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5615, data_type#5616, comment#5617]
|
jonathon
|
|
4192f023-f7e5-410d-a4aa-e154595bddb8
|
2025/06/13 22:18:18
|
2025/06/13 22:18:18
|
2025/06/13 22:18:18
|
90 ms
|
196 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#2477, data_type#2478, comment#2479]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2477, data_type#2478, comment#2479]
== Optimized Logical Plan ==
CommandResult [col_name#2477, data_type#2478, comment#2479], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2477, data_type#2478, comment#2479]
== Physical Plan ==
CommandResult [col_name#2477, data_type#2478, comment#2479]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2477, data_type#2478, comment#2479]
|
jonathan
|
|
420aad98-a026-4b03-91ed-a2e3e50b509a
|
2025/06/13 23:27:03
|
2025/06/13 23:27:03
|
2025/06/13 23:27:04
|
11 ms
|
346 ms
|
SHOW TABLES IN `global_temp`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#3798, tableName#3799, isTemporary#3800]
+- 'UnresolvedNamespace [global_temp]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#3798, tableName#3799, isTemporary#3800]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [global_temp]
== Optimized Logical Plan ==
CommandResult [namespace#3798, tableName#3799, isTemporary#3800], ShowTables [namespace#3798, tableName#3799, isTemporary#3800], V2SessionCatalog(spark_catalog), [global_temp]
+- ShowTables [namespace#3798, tableName#3799, isTemporary#3800]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [global_temp]
== Physical Plan ==
CommandResult <empty>, [namespace#3798, tableName#3799, isTemporary#3800]
+- ShowTables [namespace#3798, tableName#3799, isTemporary#3800], V2SessionCatalog(spark_catalog), [global_temp]
|
jonathan
|
|
42443fae-e69b-4568-86b8-a04c28fdddd2
|
2025/06/13 22:39:06
|
2025/06/13 22:39:06
|
2025/06/13 22:39:06
|
48 ms
|
325 ms
|
SHOW TABLES IN `default`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#2737, tableName#2738, isTemporary#2739]
+- 'UnresolvedNamespace [default]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#2737, tableName#2738, isTemporary#2739]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [default]
== Optimized Logical Plan ==
CommandResult [namespace#2737, tableName#2738, isTemporary#2739], ShowTables [namespace#2737, tableName#2738, isTemporary#2739], V2SessionCatalog(spark_catalog), [default], [[0,2000000007,2800000008,0,746c7561666564,7374726f70726961], [0,2000000007,2800000008,0,746c7561666564,73657079746c6c61], [0,2000000007,2800000009,0,746c7561666564,73657079746c6c61,32], [0,2000000007,280000000d,0,746c7561666564,73657079746c6c61,6369736162], [0,2000000007,280000000e,0,746c7561666564,73657079746c6c61,326369736162], [0,2000000007,2800000009,0,746c7561666564,7079747961727261,65], [0,2000000007,280000000a,0,746c7561666564,7974746e69676962,6570], [0,2000000007,280000000a,0,746c7561666564,79747972616e6962,6570], [0,2000000007,2800000008,0,746c7561666564,6570797465746164], [0,2000000007,280000000b,0,746c7561666564,746c616d69636564,657079], [0,2000000007,2800000009,0,746c7561666564,70797474616f6c66,65], [0,2000000007,2800000008,0,746c7561666564,736570797470616d], [0,2000000007,280000000b,0,746c7561666564,646978617463796e,617461], [0,2000000007,280000000f,0,746c7561666564,746978617463796e,61746164706972], [0,2000000007,2800000010,0,746c7561666564,7365745f656d6f73,32656c6261745f74], [0,2000000007,280000000a,0,746c7561666564,7974746375727473,6570], [0,2000000007,280000000e,0,746c7561666564,656e6f7a69786174,70756b6f6f6c], [0,2000000007,280000000c,0,746c7561666564,74676e696b726f77,73657079], [0,2000000007,2800000016,0,746c7561666564,74676e696b726f77,6874697773657079,7265626d756e]]
+- ShowTables [namespace#2737, tableName#2738, isTemporary#2739]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [default]
== Physical Plan ==
CommandResult [namespace#2737, tableName#2738, isTemporary#2739]
+- ShowTables [namespace#2737, tableName#2738, isTemporary#2739], V2SessionCatalog(spark_catalog), [default]
|
jonathon
|
|
4287216d-9cf3-48d4-8586-f3259d75a4db
|
2025/06/13 07:16:51
|
2025/06/13 07:16:51
|
2025/06/13 07:16:51
|
90 ms
|
189 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#2058, data_type#2059, comment#2060]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2058, data_type#2059, comment#2060]
== Optimized Logical Plan ==
CommandResult [col_name#2058, data_type#2059, comment#2060], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2058, data_type#2059, comment#2060]
== Physical Plan ==
CommandResult [col_name#2058, data_type#2059, comment#2060]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2058, data_type#2059, comment#2060]
|
jonathon
|
|
43a87bef-72a9-4d8b-94b3-ae38d5701de3
|
2025/06/13 22:37:47
|
2025/06/13 22:37:47
|
2025/06/13 22:37:47
|
45 ms
|
182 ms
|
set -v
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
SetCommand (-v,None)
== Analyzed Logical Plan ==
key: string, value: string, meaning: string, Since version: string
SetCommand (-v,None)
== Optimized Logical Plan ==
CommandResult [key#2573, value#2574, meaning#2575, Since version#2576], Execute SetCommand, [[spark.sql.adaptive.advisoryPartitionSizeInBytes,<value of spark.sql.adaptive.shuffle.targetPostShuffleInputSize>,The advisory size in bytes of the shuffle partition during adaptive optimization (when spark.sql.adaptive.enabled is true). It takes effect when Spark coalesces small shuffle partitions or splits skewed shuffle partition.,3.0.0], [spark.sql.adaptive.autoBroadcastJoinThreshold,<undefined>,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. The default value is same with spark.sql.autoBroadcastJoinThreshold. Note that, this config is used only in adaptive framework.,3.2.0], [spark.sql.adaptive.coalescePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will coalesce contiguous shuffle partitions according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid too many small tasks.,3.0.0], [spark.sql.adaptive.coalescePartitions.initialPartitionNum,<undefined>,The initial number of shuffle partitions before coalescing. If not set, it equals to spark.sql.shuffle.partitions. This configuration only has an effect when 'spark.sql.adaptive.enabled' and 'spark.sql.adaptive.coalescePartitions.enabled' are both true.,3.0.0], [spark.sql.adaptive.coalescePartitions.minPartitionSize,1MB,The minimum size of shuffle partitions after coalescing. This is useful when the adaptively calculated target size is too small during partition coalescing.,3.2.0], [spark.sql.adaptive.coalescePartitions.parallelismFirst,true,When true, Spark does not respect the target size specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes' (default 64MB) when coalescing contiguous shuffle partitions, but adaptively calculate the target size according to the default parallelism of the Spark cluster. The calculated size is usually smaller than the configured target size. This is to maximize the parallelism and avoid performance regression when enabling adaptive query execution. It's recommended to set this config to false and respect the configured target size.,3.2.0], [spark.sql.adaptive.customCostEvaluatorClass,<undefined>,The custom cost evaluator class to be used for adaptive execution. If not being set, Spark will use its own SimpleCostEvaluator by default.,3.2.0], [spark.sql.adaptive.enabled,true,When true, enable adaptive query execution, which re-optimizes the query plan in the middle of query execution, based on accurate runtime statistics.,1.6.0], [spark.sql.adaptive.forceOptimizeSkewedJoin,false,When true, force enable OptimizeSkewedJoin even if it introduces extra shuffle.,3.3.0], [spark.sql.adaptive.localShuffleReader.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark tries to use local shuffle reader to read the shuffle data when the shuffle partitioning is not needed, for example, after converting sort-merge join to broadcast-hash join.,3.0.0], [spark.sql.adaptive.maxShuffledHashJoinLocalMapThreshold,0b,Configures the maximum size in bytes per partition that can be allowed to build local hash map. If this value is not smaller than spark.sql.adaptive.advisoryPartitionSizeInBytes and all the partition size are not larger than this config, join selection prefer to use shuffled hash join instead of sort merge join regardless of the value of spark.sql.join.preferSortMergeJoin.,3.2.0], [spark.sql.adaptive.optimizeSkewsInRebalancePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will optimize the skewed shuffle partitions in RebalancePartitions and split them to smaller ones according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid data skew.,3.2.0], [spark.sql.adaptive.optimizer.excludedRules,<undefined>,Configures a list of rules to be disabled in the adaptive optimizer, in which the rules are specified by their rule names and separated by comma. The optimizer will log the rules that have indeed been excluded.,3.1.0], [spark.sql.adaptive.rebalancePartitionsSmallPartitionFactor,0.2,A partition will be merged during splitting if its size is small than this factor multiply spark.sql.adaptive.advisoryPartitionSizeInBytes.,3.3.0], [spark.sql.adaptive.skewJoin.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark dynamically handles skew in shuffled join (sort-merge and shuffled hash) by splitting (and replicating if needed) skewed partitions.,3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionFactor,5.0,A partition is considered as skewed if its size is larger than this factor multiplying the median partition size and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes',3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes,256MB,A partition is considered as skewed if its size in bytes is larger than this threshold and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionFactor' multiplying the median partition size. Ideally this config should be set larger than 'spark.sql.adaptive.advisoryPartitionSizeInBytes'.,3.0.0], [spark.sql.allowNamedFunctionArguments,true,If true, Spark will turn on support for named parameters for all functions that has it implemented.,3.5.0], [spark.sql.ansi.doubleQuotedIdentifiers,false,When true and 'spark.sql.ansi.enabled' is true, Spark SQL reads literals enclosed in double quoted (") as identifiers. When false they are read as string literals.,3.4.0], [spark.sql.ansi.enabled,false,When true, Spark SQL uses an ANSI compliant dialect instead of being Hive compliant. For example, Spark will throw an exception at runtime instead of returning null results when the inputs to a SQL operator/function are invalid.For full details of this dialect, you can find them in the section "ANSI Compliance" of Spark's documentation. Some ANSI dialect features may be not from the ANSI SQL standard directly, but their behaviors align with ANSI SQL's style,3.0.0], [spark.sql.ansi.enforceReservedKeywords,false,When true and 'spark.sql.ansi.enabled' is true, the Spark SQL parser enforces the ANSI reserved keywords and forbids SQL queries that use reserved keywords as alias names and/or identifiers for table, view, function, etc.,3.3.0], [spark.sql.ansi.relationPrecedence,false,When true and 'spark.sql.ansi.enabled' is true, JOIN takes precedence over comma when combining relation. For example, `t1, t2 JOIN t3` should result to `t1 X (t2 X t3)`. If the config is false, the result is `(t1 X t2) X t3`.,3.4.0], [spark.sql.autoBroadcastJoinThreshold,10MB,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. Note that currently statistics are only supported for Hive Metastore tables where the command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been run, and file-based data source tables where the statistics are computed directly on the files of data.,1.1.0], [spark.sql.avro.compression.codec,snappy,Compression codec used in writing of AVRO files. Supported codecs: uncompressed, deflate, snappy, bzip2, xz and zstandard. Default codec is snappy.,2.4.0], ... 183 more fields]
+- SetCommand (-v,None)
== Physical Plan ==
CommandResult [key#2573, value#2574, meaning#2575, Since version#2576]
+- Execute SetCommand
+- SetCommand (-v,None)
|
jonathon
|
|
43bedd5c-dc31-4efa-8841-388b737ecadb
|
2025/06/14 01:23:33
|
2025/06/14 01:23:34
|
2025/06/14 01:23:34
|
200 ms
|
352 ms
|
Listing tables 'catalog : null, schemaPattern : %, tableTypes : null, tableName : %'
|
CLOSED
|
|
jonathan
|
|
44261df9-dd24-46c0-8629-c0e5c44b8ebf
|
2025/06/13 22:54:09
|
2025/06/13 22:54:09
|
2025/06/13 22:54:10
|
1 ms
|
753 ms
|
Listing catalogs
|
CLOSED
|
|
jonathon
|
|
44528be3-10bd-467f-b2eb-e296c381fef1
|
2025/06/13 23:38:37
|
2025/06/13 23:38:37
|
2025/06/13 23:38:37
|
23 ms
|
121 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathan
|
|
44f1afa7-c4b6-4189-bee1-d284065506de
|
2025/06/13 23:24:48
|
2025/06/13 23:24:48
|
2025/06/13 23:24:48
|
84 ms
|
366 ms
|
DESCRIBE TABLE `default`.`alltypes`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#3620, data_type#3621, comment#3622]
+- 'UnresolvedTableOrView [default, alltypes], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3620, data_type#3621, comment#3622]
== Optimized Logical Plan ==
CommandResult [col_name#3620, data_type#3621, comment#3622], Execute DescribeTableCommand, [[STRING,string,null], [DOUBLE,double,null], [INTEGER,int,null], [BIGINT,bigint,null], [FLOAT,float,null], [DECIMAL,decimal(10,2),null], [NUMBER,decimal(10,2),null], [BOOLEAN,boolean,null], [DATE,date,null], [TIMESTAMP,timestamp,null], [DATETIME,timestamp,null], [BINARY,binary,null], [ARRAY,array<int>,null], [MAP,map<string,string>,null], [STRUCT,struct<field1:string,field2:int>,null], [VARCHAR,string,null], [CHAR,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3620, data_type#3621, comment#3622]
== Physical Plan ==
CommandResult [col_name#3620, data_type#3621, comment#3622]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3620, data_type#3621, comment#3622]
|
jonathon
|
|
47c896dd-4ddd-47a9-a61a-2ff468982f35
|
2025/06/14 01:47:47
|
2025/06/14 01:47:47
|
2025/06/14 01:47:47
|
91 ms
|
189 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#4728, data_type#4729, comment#4730]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4728, data_type#4729, comment#4730]
== Optimized Logical Plan ==
CommandResult [col_name#4728, data_type#4729, comment#4730], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4728, data_type#4729, comment#4730]
== Physical Plan ==
CommandResult [col_name#4728, data_type#4729, comment#4730]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4728, data_type#4729, comment#4730]
|
jonathon
|
|
47ef6081-63ac-45db-9286-1fb48f5f4221
|
2025/06/13 22:44:55
|
2025/06/13 22:44:55
|
2025/06/13 22:44:55
|
100 ms
|
200 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#2822, data_type#2823, comment#2824]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2822, data_type#2823, comment#2824]
== Optimized Logical Plan ==
CommandResult [col_name#2822, data_type#2823, comment#2824], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2822, data_type#2823, comment#2824]
== Physical Plan ==
CommandResult [col_name#2822, data_type#2823, comment#2824]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#2822, data_type#2823, comment#2824]
|
jonathan
|
|
48c34221-b487-4bf6-8bb8-75a3009aa2e7
|
2025/06/13 23:35:49
|
2025/06/13 23:35:50
|
2025/06/13 23:35:50
|
208 ms
|
280 ms
|
Listing tables 'catalog : null, schemaPattern : %, tableTypes : null, tableName : %'
|
CLOSED
|
|
jonathan
|
|
49ebf4e0-34db-4e16-8552-40f05c9b3765
|
2025/06/13 22:51:53
|
2025/06/13 22:51:53
|
2025/06/13 22:51:53
|
20 ms
|
338 ms
|
SHOW TABLES IN `test`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#3001, tableName#3002, isTemporary#3003]
+- 'UnresolvedNamespace [test]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#3001, tableName#3002, isTemporary#3003]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [test]
== Optimized Logical Plan ==
CommandResult [namespace#3001, tableName#3002, isTemporary#3003], ShowTables [namespace#3001, tableName#3002, isTemporary#3003], V2SessionCatalog(spark_catalog), [test]
+- ShowTables [namespace#3001, tableName#3002, isTemporary#3003]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [test]
== Physical Plan ==
CommandResult <empty>, [namespace#3001, tableName#3002, isTemporary#3003]
+- ShowTables [namespace#3001, tableName#3002, isTemporary#3003], V2SessionCatalog(spark_catalog), [test]
|
jonathon
|
|
4c448d36-25b1-4d61-b0d9-616170b9ed52
|
2025/06/13 22:18:18
|
2025/06/13 22:18:18
|
2025/06/13 22:18:18
|
55 ms
|
150 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathan
|
|
4f0e028f-5895-4e97-9391-b34a1403a426
|
2025/06/13 23:13:23
|
2025/06/13 23:13:23
|
2025/06/13 23:13:24
|
65 ms
|
329 ms
|
DESCRIBE TABLE `default`.`AllTypes`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#3155, data_type#3156, comment#3157]
+- 'UnresolvedTableOrView [default, AllTypes], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3155, data_type#3156, comment#3157]
== Optimized Logical Plan ==
CommandResult [col_name#3155, data_type#3156, comment#3157], Execute DescribeTableCommand, [[STRING,string,null], [DOUBLE,double,null], [INTEGER,int,null], [BIGINT,bigint,null], [FLOAT,float,null], [DECIMAL,decimal(10,2),null], [NUMBER,decimal(10,2),null], [BOOLEAN,boolean,null], [DATE,date,null], [TIMESTAMP,timestamp,null], [DATETIME,timestamp,null], [BINARY,binary,null], [ARRAY,array<int>,null], [MAP,map<string,string>,null], [STRUCT,struct<field1:string,field2:int>,null], [VARCHAR,string,null], [CHAR,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3155, data_type#3156, comment#3157]
== Physical Plan ==
CommandResult [col_name#3155, data_type#3156, comment#3157]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3155, data_type#3156, comment#3157]
|
jonathon
|
|
4faa626d-28d0-4b94-9f95-6e919c2058bb
|
2025/06/15 06:43:48
|
2025/06/15 06:43:48
|
2025/06/15 06:43:48
|
87 ms
|
185 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#5304, data_type#5305, comment#5306]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5304, data_type#5305, comment#5306]
== Optimized Logical Plan ==
CommandResult [col_name#5304, data_type#5305, comment#5306], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5304, data_type#5305, comment#5306]
== Physical Plan ==
CommandResult [col_name#5304, data_type#5305, comment#5306]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5304, data_type#5305, comment#5306]
|
jonathan
|
|
500a6e02-2c16-4a5f-aa79-c12bf659d6f8
|
2025/06/13 23:21:10
|
2025/06/13 23:21:10
|
2025/06/13 23:21:10
|
38 ms
|
387 ms
|
Listing databases 'catalog : , schemaPattern : null'
|
CLOSED
|
|
jonathan
|
|
51708b4c-da4e-4cdc-96e6-ebf36f4d8c54
|
2025/06/13 23:21:32
|
2025/06/13 23:21:33
|
2025/06/13 23:21:33
|
67 ms
|
346 ms
|
DESCRIBE TABLE `default`.`alltypes`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#3487, data_type#3488, comment#3489]
+- 'UnresolvedTableOrView [default, alltypes], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3487, data_type#3488, comment#3489]
== Optimized Logical Plan ==
CommandResult [col_name#3487, data_type#3488, comment#3489], Execute DescribeTableCommand, [[STRING,string,null], [DOUBLE,double,null], [INTEGER,int,null], [BIGINT,bigint,null], [FLOAT,float,null], [DECIMAL,decimal(10,2),null], [NUMBER,decimal(10,2),null], [BOOLEAN,boolean,null], [DATE,date,null], [TIMESTAMP,timestamp,null], [DATETIME,timestamp,null], [BINARY,binary,null], [ARRAY,array<int>,null], [MAP,map<string,string>,null], [STRUCT,struct<field1:string,field2:int>,null], [VARCHAR,string,null], [CHAR,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3487, data_type#3488, comment#3489]
== Physical Plan ==
CommandResult [col_name#3487, data_type#3488, comment#3489]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#3487, data_type#3488, comment#3489]
|
jonathan
|
|
52fc6c77-7c71-4a15-bcf7-b3b231790177
|
2025/06/13 23:35:50
|
2025/06/13 23:35:50
|
2025/06/13 23:35:50
|
37 ms
|
119 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : alltypes, columnName : null'
|
CLOSED
|
|
jonathan
|
|
53514d4b-1396-435a-949f-58e0bbcf0f24
|
2025/06/13 23:35:49
|
2025/06/13 23:35:49
|
2025/06/13 23:35:49
|
44 ms
|
156 ms
|
set -v
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
SetCommand (-v,None)
== Analyzed Logical Plan ==
key: string, value: string, meaning: string, Since version: string
SetCommand (-v,None)
== Optimized Logical Plan ==
CommandResult [key#4138, value#4139, meaning#4140, Since version#4141], Execute SetCommand, [[spark.sql.adaptive.advisoryPartitionSizeInBytes,<value of spark.sql.adaptive.shuffle.targetPostShuffleInputSize>,The advisory size in bytes of the shuffle partition during adaptive optimization (when spark.sql.adaptive.enabled is true). It takes effect when Spark coalesces small shuffle partitions or splits skewed shuffle partition.,3.0.0], [spark.sql.adaptive.autoBroadcastJoinThreshold,<undefined>,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. The default value is same with spark.sql.autoBroadcastJoinThreshold. Note that, this config is used only in adaptive framework.,3.2.0], [spark.sql.adaptive.coalescePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will coalesce contiguous shuffle partitions according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid too many small tasks.,3.0.0], [spark.sql.adaptive.coalescePartitions.initialPartitionNum,<undefined>,The initial number of shuffle partitions before coalescing. If not set, it equals to spark.sql.shuffle.partitions. This configuration only has an effect when 'spark.sql.adaptive.enabled' and 'spark.sql.adaptive.coalescePartitions.enabled' are both true.,3.0.0], [spark.sql.adaptive.coalescePartitions.minPartitionSize,1MB,The minimum size of shuffle partitions after coalescing. This is useful when the adaptively calculated target size is too small during partition coalescing.,3.2.0], [spark.sql.adaptive.coalescePartitions.parallelismFirst,true,When true, Spark does not respect the target size specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes' (default 64MB) when coalescing contiguous shuffle partitions, but adaptively calculate the target size according to the default parallelism of the Spark cluster. The calculated size is usually smaller than the configured target size. This is to maximize the parallelism and avoid performance regression when enabling adaptive query execution. It's recommended to set this config to false and respect the configured target size.,3.2.0], [spark.sql.adaptive.customCostEvaluatorClass,<undefined>,The custom cost evaluator class to be used for adaptive execution. If not being set, Spark will use its own SimpleCostEvaluator by default.,3.2.0], [spark.sql.adaptive.enabled,true,When true, enable adaptive query execution, which re-optimizes the query plan in the middle of query execution, based on accurate runtime statistics.,1.6.0], [spark.sql.adaptive.forceOptimizeSkewedJoin,false,When true, force enable OptimizeSkewedJoin even if it introduces extra shuffle.,3.3.0], [spark.sql.adaptive.localShuffleReader.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark tries to use local shuffle reader to read the shuffle data when the shuffle partitioning is not needed, for example, after converting sort-merge join to broadcast-hash join.,3.0.0], [spark.sql.adaptive.maxShuffledHashJoinLocalMapThreshold,0b,Configures the maximum size in bytes per partition that can be allowed to build local hash map. If this value is not smaller than spark.sql.adaptive.advisoryPartitionSizeInBytes and all the partition size are not larger than this config, join selection prefer to use shuffled hash join instead of sort merge join regardless of the value of spark.sql.join.preferSortMergeJoin.,3.2.0], [spark.sql.adaptive.optimizeSkewsInRebalancePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will optimize the skewed shuffle partitions in RebalancePartitions and split them to smaller ones according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid data skew.,3.2.0], [spark.sql.adaptive.optimizer.excludedRules,<undefined>,Configures a list of rules to be disabled in the adaptive optimizer, in which the rules are specified by their rule names and separated by comma. The optimizer will log the rules that have indeed been excluded.,3.1.0], [spark.sql.adaptive.rebalancePartitionsSmallPartitionFactor,0.2,A partition will be merged during splitting if its size is small than this factor multiply spark.sql.adaptive.advisoryPartitionSizeInBytes.,3.3.0], [spark.sql.adaptive.skewJoin.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark dynamically handles skew in shuffled join (sort-merge and shuffled hash) by splitting (and replicating if needed) skewed partitions.,3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionFactor,5.0,A partition is considered as skewed if its size is larger than this factor multiplying the median partition size and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes',3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes,256MB,A partition is considered as skewed if its size in bytes is larger than this threshold and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionFactor' multiplying the median partition size. Ideally this config should be set larger than 'spark.sql.adaptive.advisoryPartitionSizeInBytes'.,3.0.0], [spark.sql.allowNamedFunctionArguments,true,If true, Spark will turn on support for named parameters for all functions that has it implemented.,3.5.0], [spark.sql.ansi.doubleQuotedIdentifiers,false,When true and 'spark.sql.ansi.enabled' is true, Spark SQL reads literals enclosed in double quoted (") as identifiers. When false they are read as string literals.,3.4.0], [spark.sql.ansi.enabled,false,When true, Spark SQL uses an ANSI compliant dialect instead of being Hive compliant. For example, Spark will throw an exception at runtime instead of returning null results when the inputs to a SQL operator/function are invalid.For full details of this dialect, you can find them in the section "ANSI Compliance" of Spark's documentation. Some ANSI dialect features may be not from the ANSI SQL standard directly, but their behaviors align with ANSI SQL's style,3.0.0], [spark.sql.ansi.enforceReservedKeywords,false,When true and 'spark.sql.ansi.enabled' is true, the Spark SQL parser enforces the ANSI reserved keywords and forbids SQL queries that use reserved keywords as alias names and/or identifiers for table, view, function, etc.,3.3.0], [spark.sql.ansi.relationPrecedence,false,When true and 'spark.sql.ansi.enabled' is true, JOIN takes precedence over comma when combining relation. For example, `t1, t2 JOIN t3` should result to `t1 X (t2 X t3)`. If the config is false, the result is `(t1 X t2) X t3`.,3.4.0], [spark.sql.autoBroadcastJoinThreshold,10MB,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. Note that currently statistics are only supported for Hive Metastore tables where the command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been run, and file-based data source tables where the statistics are computed directly on the files of data.,1.1.0], [spark.sql.avro.compression.codec,snappy,Compression codec used in writing of AVRO files. Supported codecs: uncompressed, deflate, snappy, bzip2, xz and zstandard. Default codec is snappy.,2.4.0], ... 183 more fields]
+- SetCommand (-v,None)
== Physical Plan ==
CommandResult [key#4138, value#4139, meaning#4140, Since version#4141]
+- Execute SetCommand
+- SetCommand (-v,None)
|
jonathon
|
|
5418b015-6e13-442e-b46b-17892c1656ee
|
2025/06/15 06:45:38
|
2025/06/15 06:45:38
|
2025/06/15 06:45:38
|
193 ms
|
345 ms
|
Listing tables 'catalog : null, schemaPattern : %, tableTypes : null, tableName : %'
|
CLOSED
|
|
jonathon
|
|
557d3a44-cbef-455b-99fc-f26ee1546387
|
2025/06/13 22:37:47
|
2025/06/13 22:37:47
|
2025/06/13 22:37:47
|
24 ms
|
116 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathon
|
[50]
|
5a03ce3f-80ca-4a28-bbcf-cf5d41428bbe
|
2025/06/15 06:45:39
|
2025/06/15 06:45:39
|
2025/06/15 06:45:39
|
175 ms
|
330 ms
|
SELECT C_43 AS C_12, C_2 AS C_13, C_4 AS C_14, C_4331 AS C_17, C_4332 AS C_16, C_4333 AS C_18, C_8 AS C_19, C_5 AS C_15, C_9 AS C_20, C_6 AS C_24, C_7 AS C_21, C_11 AS C_23, C_10 AS C_22, C_0 AS C_25 FROM (SELECT C_64656661756c745f616972706f727473.`id` AS C_43, C_64656661756c745f616972706f727473.`type` AS C_2, C_64656661756c745f616972706f727473.`name` AS C_4, C_64656661756c745f616972706f727473.`lat` AS C_1, C_64656661756c745f616972706f727473.`lon` AS C_3, C_64656661756c745f616972706f727473.`elev` AS C_0, C_64656661756c745f616972706f727473.`continent` AS C_8, C_64656661756c745f616972706f727473.`country` AS C_5, C_64656661756c745f616972706f727473.`region` AS C_9, C_64656661756c745f616972706f727473.`city` AS C_6, C_64656661756c745f616972706f727473.`iata` AS C_7, C_64656661756c745f616972706f727473.`code` AS C_11, C_64656661756c745f616972706f727473.`gps` AS C_10, (round((C_64656661756c745f616972706f727473.`lat` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4331, (round((C_64656661756c745f616972706f727473.`lon` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4332, (round((C_64656661756c745f616972706f727473.`elev` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4333 FROM `default`.`airports` C_64656661756c745f616972706f727473 WHERE ((C_64656661756c745f616972706f727473.`lon` <= (- 1.040500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lon` >= (- 1.110500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lat` >= 4.100000000000000E+001) AND (C_64656661756c745f616972706f727473.`lat` <= 4.500000000000000E+001)) ) C_4954424c ORDER BY C_25 DESC LIMIT 5
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'GlobalLimit 5
+- 'LocalLimit 5
+- 'Sort ['C_25 DESC NULLS LAST], true
+- 'Project ['C_43 AS C_12#5510, 'C_2 AS C_13#5511, 'C_4 AS C_14#5512, 'C_4331 AS C_17#5513, 'C_4332 AS C_16#5514, 'C_4333 AS C_18#5515, 'C_8 AS C_19#5516, 'C_5 AS C_15#5517, 'C_9 AS C_20#5518, 'C_6 AS C_24#5519, 'C_7 AS C_21#5520, 'C_11 AS C_23#5521, 'C_10 AS C_22#5522, 'C_0 AS C_25#5523]
+- 'SubqueryAlias C_4954424c
+- 'Project ['C_64656661756c745f616972706f727473.id AS C_43#5494, 'C_64656661756c745f616972706f727473.type AS C_2#5495, 'C_64656661756c745f616972706f727473.name AS C_4#5496, 'C_64656661756c745f616972706f727473.lat AS C_1#5497, 'C_64656661756c745f616972706f727473.lon AS C_3#5498, 'C_64656661756c745f616972706f727473.elev AS C_0#5499, 'C_64656661756c745f616972706f727473.continent AS C_8#5500, 'C_64656661756c745f616972706f727473.country AS C_5#5501, 'C_64656661756c745f616972706f727473.region AS C_9#5502, 'C_64656661756c745f616972706f727473.city AS C_6#5503, 'C_64656661756c745f616972706f727473.iata AS C_7#5504, 'C_64656661756c745f616972706f727473.code AS C_11#5505, 'C_64656661756c745f616972706f727473.gps AS C_10#5506, ('round(('C_64656661756c745f616972706f727473.lat * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4331#5507, ('round(('C_64656661756c745f616972706f727473.lon * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4332#5508, ('round(('C_64656661756c745f616972706f727473.elev * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4333#5509]
+- 'Filter ((('C_64656661756c745f616972706f727473.lon <= -104.05) AND ('C_64656661756c745f616972706f727473.lon >= -111.05)) AND (('C_64656661756c745f616972706f727473.lat >= 41.0) AND ('C_64656661756c745f616972706f727473.lat <= 45.0)))
+- 'SubqueryAlias C_64656661756c745f616972706f727473
+- 'UnresolvedRelation [default, airports], [], false
== Analyzed Logical Plan ==
C_12: string, C_13: string, C_14: string, C_17: double, C_16: double, C_18: double, C_19: string, C_15: string, C_20: string, C_24: string, C_21: string, C_23: string, C_22: string, C_25: double
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_25#5523 DESC NULLS LAST], true
+- Project [C_43#5494 AS C_12#5510, C_2#5495 AS C_13#5511, C_4#5496 AS C_14#5512, C_4331#5507 AS C_17#5513, C_4332#5508 AS C_16#5514, C_4333#5509 AS C_18#5515, C_8#5500 AS C_19#5516, C_5#5501 AS C_15#5517, C_9#5502 AS C_20#5518, C_6#5503 AS C_24#5519, C_7#5504 AS C_21#5520, C_11#5505 AS C_23#5521, C_10#5506 AS C_22#5522, C_0#5499 AS C_25#5523]
+- SubqueryAlias C_4954424c
+- Project [id#5524 AS C_43#5494, type#5525 AS C_2#5495, name#5526 AS C_4#5496, lat#5527 AS C_1#5497, lon#5528 AS C_3#5498, elev#5529 AS C_0#5499, continent#5530 AS C_8#5500, country#5531 AS C_5#5501, region#5532 AS C_9#5502, city#5533 AS C_6#5503, iata#5534 AS C_7#5504, code#5535 AS C_11#5505, gps#5536 AS C_10#5506, (round((lat#5527 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4331#5507, (round((lon#5528 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4332#5508, (round((elev#5529 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4333#5509]
+- Filter (((lon#5528 <= -104.05) AND (lon#5528 >= -111.05)) AND ((lat#5527 >= 41.0) AND (lat#5527 <= 45.0)))
+- SubqueryAlias C_64656661756c745f616972706f727473
+- SubqueryAlias spark_catalog.default.airports
+- Relation spark_catalog.default.airports[id#5524,type#5525,name#5526,lat#5527,lon#5528,elev#5529,continent#5530,country#5531,region#5532,city#5533,iata#5534,code#5535,gps#5536] parquet
== Optimized Logical Plan ==
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_25#5523 DESC NULLS LAST], true
+- Project [id#5524 AS C_12#5510, type#5525 AS C_13#5511, name#5526 AS C_14#5512, (round((lat#5527 * 1000.0), 0) / 1000.0) AS C_17#5513, (round((lon#5528 * 1000.0), 0) / 1000.0) AS C_16#5514, (round((elev#5529 * 1000.0), 0) / 1000.0) AS C_18#5515, continent#5530 AS C_19#5516, country#5531 AS C_15#5517, region#5532 AS C_20#5518, city#5533 AS C_24#5519, iata#5534 AS C_21#5520, code#5535 AS C_23#5521, gps#5536 AS C_22#5522, elev#5529 AS C_25#5523]
+- Filter ((isnotnull(lon#5528) AND isnotnull(lat#5527)) AND (((lon#5528 <= -104.05) AND (lon#5528 >= -111.05)) AND ((lat#5527 >= 41.0) AND (lat#5527 <= 45.0))))
+- Relation spark_catalog.default.airports[id#5524,type#5525,name#5526,lat#5527,lon#5528,elev#5529,continent#5530,country#5531,region#5532,city#5533,iata#5534,code#5535,gps#5536] parquet
== Physical Plan ==
TakeOrderedAndProject(limit=5, orderBy=[C_25#5523 DESC NULLS LAST], output=[C_12#5510,C_13#5511,C_14#5512,C_17#5513,C_16#5514,C_18#5515,C_19#5516,C_15#5517,C_20#5518,C_24#5519,C_21#5520,C_23#5521,C_22#5522,C_25#5523])
+- *(1) Project [id#5524 AS C_12#5510, type#5525 AS C_13#5511, name#5526 AS C_14#5512, (round((lat#5527 * 1000.0), 0) / 1000.0) AS C_17#5513, (round((lon#5528 * 1000.0), 0) / 1000.0) AS C_16#5514, (round((elev#5529 * 1000.0), 0) / 1000.0) AS C_18#5515, continent#5530 AS C_19#5516, country#5531 AS C_15#5517, region#5532 AS C_20#5518, city#5533 AS C_24#5519, iata#5534 AS C_21#5520, code#5535 AS C_23#5521, gps#5536 AS C_22#5522, elev#5529 AS C_25#5523]
+- *(1) Filter (((((isnotnull(lon#5528) AND isnotnull(lat#5527)) AND (lon#5528 <= -104.05)) AND (lon#5528 >= -111.05)) AND (lat#5527 >= 41.0)) AND (lat#5527 <= 45.0))
+- *(1) ColumnarToRow
+- FileScan parquet spark_catalog.default.airports[id#5524,type#5525,name#5526,lat#5527,lon#5528,elev#5529,continent#5530,country#5531,region#5532,city#5533,iata#5534,code#5535,gps#5536] Batched: true, DataFilters: [isnotnull(lon#5528), isnotnull(lat#5527), (lon#5528 <= -104.05), (lon#5528 >= -111.05), (lat#552..., Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [IsNotNull(lon), IsNotNull(lat), LessThanOrEqual(lon,-104.05), GreaterThanOrEqual(lon,-111.05), G..., ReadSchema: struct<id:string,type:string,name:string,lat:double,lon:double,elev:double,continent:string,count...
|
jonathan
|
|
5e172aa2-fe08-44e3-82f2-ed6ee889f80d
|
2025/06/13 23:35:50
|
2025/06/13 23:35:50
|
2025/06/13 23:35:50
|
89 ms
|
165 ms
|
DESCRIBE default.alltypes
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#4163, data_type#4164, comment#4165]
+- 'UnresolvedTableOrView [default, alltypes], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#4163, data_type#4164, comment#4165]
== Optimized Logical Plan ==
CommandResult [col_name#4163, data_type#4164, comment#4165], Execute DescribeTableCommand, [[STRING,string,null], [DOUBLE,double,null], [INTEGER,int,null], [BIGINT,bigint,null], [FLOAT,float,null], [DECIMAL,decimal(10,2),null], [NUMBER,decimal(10,2),null], [BOOLEAN,boolean,null], [DATE,date,null], [TIMESTAMP,timestamp,null], [DATETIME,timestamp,null], [BINARY,binary,null], [ARRAY,array<int>,null], [MAP,map<string,string>,null], [STRUCT,struct<field1:string,field2:int>,null], [VARCHAR,string,null], [CHAR,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#4163, data_type#4164, comment#4165]
== Physical Plan ==
CommandResult [col_name#4163, data_type#4164, comment#4165]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#4163, data_type#4164, comment#4165]
|
jonathon
|
|
5e93842a-edd0-4428-9cfc-46c4b8080284
|
2025/06/15 06:43:48
|
2025/06/15 06:43:48
|
2025/06/15 06:43:48
|
84 ms
|
180 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#5327, data_type#5328, comment#5329]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5327, data_type#5328, comment#5329]
== Optimized Logical Plan ==
CommandResult [col_name#5327, data_type#5328, comment#5329], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5327, data_type#5328, comment#5329]
== Physical Plan ==
CommandResult [col_name#5327, data_type#5328, comment#5329]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5327, data_type#5328, comment#5329]
|
jonathon
|
[36]
|
5ee2818f-b651-41c5-ad86-413b14b0a699
|
2025/06/13 22:37:48
|
2025/06/13 22:37:48
|
2025/06/13 22:37:48
|
186 ms
|
282 ms
|
SELECT C_43 AS C_16, C_3 AS C_17, C_2 AS C_15, C_4331 AS C_21, C_4332 AS C_22, C_4333 AS C_19, C_7 AS C_18, C_6 AS C_20, C_9 AS C_23, C_8 AS C_25, C_5 AS C_24, C_11 AS C_13, C_10 AS C_12, C_1 AS C_14 FROM (SELECT C_64656661756c745f616972706f727473.`id` AS C_43, C_64656661756c745f616972706f727473.`type` AS C_3, C_64656661756c745f616972706f727473.`name` AS C_2, C_64656661756c745f616972706f727473.`lat` AS C_0, C_64656661756c745f616972706f727473.`lon` AS C_4, C_64656661756c745f616972706f727473.`elev` AS C_1, C_64656661756c745f616972706f727473.`continent` AS C_7, C_64656661756c745f616972706f727473.`country` AS C_6, C_64656661756c745f616972706f727473.`region` AS C_9, C_64656661756c745f616972706f727473.`city` AS C_8, C_64656661756c745f616972706f727473.`iata` AS C_5, C_64656661756c745f616972706f727473.`code` AS C_11, C_64656661756c745f616972706f727473.`gps` AS C_10, (round((C_64656661756c745f616972706f727473.`lat` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4331, (round((C_64656661756c745f616972706f727473.`lon` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4332, (round((C_64656661756c745f616972706f727473.`elev` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4333 FROM `default`.`airports` C_64656661756c745f616972706f727473 WHERE ((C_64656661756c745f616972706f727473.`lon` <= (- 1.040500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lon` >= (- 1.110500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lat` >= 4.100000000000000E+001) AND (C_64656661756c745f616972706f727473.`lat` <= 4.500000000000000E+001)) ) C_4954424c ORDER BY C_14 DESC LIMIT 5
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'GlobalLimit 5
+- 'LocalLimit 5
+- 'Sort ['C_14 DESC NULLS LAST], true
+- 'Project ['C_43 AS C_16#2660, 'C_3 AS C_17#2661, 'C_2 AS C_15#2662, 'C_4331 AS C_21#2663, 'C_4332 AS C_22#2664, 'C_4333 AS C_19#2665, 'C_7 AS C_18#2666, 'C_6 AS C_20#2667, 'C_9 AS C_23#2668, 'C_8 AS C_25#2669, 'C_5 AS C_24#2670, 'C_11 AS C_13#2671, 'C_10 AS C_12#2672, 'C_1 AS C_14#2673]
+- 'SubqueryAlias C_4954424c
+- 'Project ['C_64656661756c745f616972706f727473.id AS C_43#2644, 'C_64656661756c745f616972706f727473.type AS C_3#2645, 'C_64656661756c745f616972706f727473.name AS C_2#2646, 'C_64656661756c745f616972706f727473.lat AS C_0#2647, 'C_64656661756c745f616972706f727473.lon AS C_4#2648, 'C_64656661756c745f616972706f727473.elev AS C_1#2649, 'C_64656661756c745f616972706f727473.continent AS C_7#2650, 'C_64656661756c745f616972706f727473.country AS C_6#2651, 'C_64656661756c745f616972706f727473.region AS C_9#2652, 'C_64656661756c745f616972706f727473.city AS C_8#2653, 'C_64656661756c745f616972706f727473.iata AS C_5#2654, 'C_64656661756c745f616972706f727473.code AS C_11#2655, 'C_64656661756c745f616972706f727473.gps AS C_10#2656, ('round(('C_64656661756c745f616972706f727473.lat * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4331#2657, ('round(('C_64656661756c745f616972706f727473.lon * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4332#2658, ('round(('C_64656661756c745f616972706f727473.elev * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4333#2659]
+- 'Filter ((('C_64656661756c745f616972706f727473.lon <= -104.05) AND ('C_64656661756c745f616972706f727473.lon >= -111.05)) AND (('C_64656661756c745f616972706f727473.lat >= 41.0) AND ('C_64656661756c745f616972706f727473.lat <= 45.0)))
+- 'SubqueryAlias C_64656661756c745f616972706f727473
+- 'UnresolvedRelation [default, airports], [], false
== Analyzed Logical Plan ==
C_16: string, C_17: string, C_15: string, C_21: double, C_22: double, C_19: double, C_18: string, C_20: string, C_23: string, C_25: string, C_24: string, C_13: string, C_12: string, C_14: double
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_14#2673 DESC NULLS LAST], true
+- Project [C_43#2644 AS C_16#2660, C_3#2645 AS C_17#2661, C_2#2646 AS C_15#2662, C_4331#2657 AS C_21#2663, C_4332#2658 AS C_22#2664, C_4333#2659 AS C_19#2665, C_7#2650 AS C_18#2666, C_6#2651 AS C_20#2667, C_9#2652 AS C_23#2668, C_8#2653 AS C_25#2669, C_5#2654 AS C_24#2670, C_11#2655 AS C_13#2671, C_10#2656 AS C_12#2672, C_1#2649 AS C_14#2673]
+- SubqueryAlias C_4954424c
+- Project [id#2674 AS C_43#2644, type#2675 AS C_3#2645, name#2676 AS C_2#2646, lat#2677 AS C_0#2647, lon#2678 AS C_4#2648, elev#2679 AS C_1#2649, continent#2680 AS C_7#2650, country#2681 AS C_6#2651, region#2682 AS C_9#2652, city#2683 AS C_8#2653, iata#2684 AS C_5#2654, code#2685 AS C_11#2655, gps#2686 AS C_10#2656, (round((lat#2677 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4331#2657, (round((lon#2678 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4332#2658, (round((elev#2679 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4333#2659]
+- Filter (((lon#2678 <= -104.05) AND (lon#2678 >= -111.05)) AND ((lat#2677 >= 41.0) AND (lat#2677 <= 45.0)))
+- SubqueryAlias C_64656661756c745f616972706f727473
+- SubqueryAlias spark_catalog.default.airports
+- Relation spark_catalog.default.airports[id#2674,type#2675,name#2676,lat#2677,lon#2678,elev#2679,continent#2680,country#2681,region#2682,city#2683,iata#2684,code#2685,gps#2686] parquet
== Optimized Logical Plan ==
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_14#2673 DESC NULLS LAST], true
+- Project [id#2674 AS C_16#2660, type#2675 AS C_17#2661, name#2676 AS C_15#2662, (round((lat#2677 * 1000.0), 0) / 1000.0) AS C_21#2663, (round((lon#2678 * 1000.0), 0) / 1000.0) AS C_22#2664, (round((elev#2679 * 1000.0), 0) / 1000.0) AS C_19#2665, continent#2680 AS C_18#2666, country#2681 AS C_20#2667, region#2682 AS C_23#2668, city#2683 AS C_25#2669, iata#2684 AS C_24#2670, code#2685 AS C_13#2671, gps#2686 AS C_12#2672, elev#2679 AS C_14#2673]
+- Filter ((isnotnull(lon#2678) AND isnotnull(lat#2677)) AND (((lon#2678 <= -104.05) AND (lon#2678 >= -111.05)) AND ((lat#2677 >= 41.0) AND (lat#2677 <= 45.0))))
+- Relation spark_catalog.default.airports[id#2674,type#2675,name#2676,lat#2677,lon#2678,elev#2679,continent#2680,country#2681,region#2682,city#2683,iata#2684,code#2685,gps#2686] parquet
== Physical Plan ==
TakeOrderedAndProject(limit=5, orderBy=[C_14#2673 DESC NULLS LAST], output=[C_16#2660,C_17#2661,C_15#2662,C_21#2663,C_22#2664,C_19#2665,C_18#2666,C_20#2667,C_23#2668,C_25#2669,C_24#2670,C_13#2671,C_12#2672,C_14#2673])
+- *(1) Project [id#2674 AS C_16#2660, type#2675 AS C_17#2661, name#2676 AS C_15#2662, (round((lat#2677 * 1000.0), 0) / 1000.0) AS C_21#2663, (round((lon#2678 * 1000.0), 0) / 1000.0) AS C_22#2664, (round((elev#2679 * 1000.0), 0) / 1000.0) AS C_19#2665, continent#2680 AS C_18#2666, country#2681 AS C_20#2667, region#2682 AS C_23#2668, city#2683 AS C_25#2669, iata#2684 AS C_24#2670, code#2685 AS C_13#2671, gps#2686 AS C_12#2672, elev#2679 AS C_14#2673]
+- *(1) Filter (((((isnotnull(lon#2678) AND isnotnull(lat#2677)) AND (lon#2678 <= -104.05)) AND (lon#2678 >= -111.05)) AND (lat#2677 >= 41.0)) AND (lat#2677 <= 45.0))
+- *(1) ColumnarToRow
+- FileScan parquet spark_catalog.default.airports[id#2674,type#2675,name#2676,lat#2677,lon#2678,elev#2679,continent#2680,country#2681,region#2682,city#2683,iata#2684,code#2685,gps#2686] Batched: true, DataFilters: [isnotnull(lon#2678), isnotnull(lat#2677), (lon#2678 <= -104.05), (lon#2678 >= -111.05), (lat#267..., Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [IsNotNull(lon), IsNotNull(lat), LessThanOrEqual(lon,-104.05), GreaterThanOrEqual(lon,-111.05), G..., ReadSchema: struct<id:string,type:string,name:string,lat:double,lon:double,elev:double,continent:string,count...
|
jonathan
|
|
5f9e9c75-faf6-4f50-9159-5ef4655a51ee
|
2025/06/13 22:39:07
|
2025/06/13 22:39:07
|
2025/06/13 22:39:07
|
19 ms
|
331 ms
|
SHOW TABLES IN `test`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#2777, tableName#2778, isTemporary#2779]
+- 'UnresolvedNamespace [test]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#2777, tableName#2778, isTemporary#2779]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [test]
== Optimized Logical Plan ==
CommandResult [namespace#2777, tableName#2778, isTemporary#2779], ShowTables [namespace#2777, tableName#2778, isTemporary#2779], V2SessionCatalog(spark_catalog), [test]
+- ShowTables [namespace#2777, tableName#2778, isTemporary#2779]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [test]
== Physical Plan ==
CommandResult <empty>, [namespace#2777, tableName#2778, isTemporary#2779]
+- ShowTables [namespace#2777, tableName#2778, isTemporary#2779], V2SessionCatalog(spark_catalog), [test]
|
jonathon
|
|
624cd874-eef9-451a-a076-8c63b4fc3a92
|
2025/06/14 06:13:09
|
2025/06/14 06:13:09
|
2025/06/14 06:13:09
|
86 ms
|
184 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#5016, data_type#5017, comment#5018]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5016, data_type#5017, comment#5018]
== Optimized Logical Plan ==
CommandResult [col_name#5016, data_type#5017, comment#5018], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5016, data_type#5017, comment#5018]
== Physical Plan ==
CommandResult [col_name#5016, data_type#5017, comment#5018]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#5016, data_type#5017, comment#5018]
|
jonathan
|
|
639315b9-5fb3-48fb-a136-577613f544f8
|
2025/06/13 22:54:11
|
2025/06/13 22:54:11
|
2025/06/13 22:54:11
|
48 ms
|
318 ms
|
SHOW TABLES IN `default`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#3041, tableName#3042, isTemporary#3043]
+- 'UnresolvedNamespace [default]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#3041, tableName#3042, isTemporary#3043]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [default]
== Optimized Logical Plan ==
CommandResult [namespace#3041, tableName#3042, isTemporary#3043], ShowTables [namespace#3041, tableName#3042, isTemporary#3043], V2SessionCatalog(spark_catalog), [default], [[0,2000000007,2800000008,0,746c7561666564,7374726f70726961], [0,2000000007,2800000008,0,746c7561666564,73657079746c6c61], [0,2000000007,2800000009,0,746c7561666564,73657079746c6c61,32], [0,2000000007,280000000d,0,746c7561666564,73657079746c6c61,6369736162], [0,2000000007,280000000e,0,746c7561666564,73657079746c6c61,326369736162], [0,2000000007,2800000009,0,746c7561666564,7079747961727261,65], [0,2000000007,280000000a,0,746c7561666564,7974746e69676962,6570], [0,2000000007,280000000a,0,746c7561666564,79747972616e6962,6570], [0,2000000007,2800000008,0,746c7561666564,6570797465746164], [0,2000000007,280000000b,0,746c7561666564,746c616d69636564,657079], [0,2000000007,2800000009,0,746c7561666564,70797474616f6c66,65], [0,2000000007,2800000008,0,746c7561666564,736570797470616d], [0,2000000007,280000000b,0,746c7561666564,646978617463796e,617461], [0,2000000007,280000000f,0,746c7561666564,746978617463796e,61746164706972], [0,2000000007,2800000010,0,746c7561666564,7365745f656d6f73,32656c6261745f74], [0,2000000007,280000000a,0,746c7561666564,7974746375727473,6570], [0,2000000007,280000000e,0,746c7561666564,656e6f7a69786174,70756b6f6f6c], [0,2000000007,280000000c,0,746c7561666564,74676e696b726f77,73657079], [0,2000000007,2800000016,0,746c7561666564,74676e696b726f77,6874697773657079,7265626d756e]]
+- ShowTables [namespace#3041, tableName#3042, isTemporary#3043]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [default]
== Physical Plan ==
CommandResult [namespace#3041, tableName#3042, isTemporary#3043]
+- ShowTables [namespace#3041, tableName#3042, isTemporary#3043], V2SessionCatalog(spark_catalog), [default]
|
jonathon
|
|
63f33d59-8ed5-4bfc-a086-5532e1e184fa
|
2025/06/13 07:16:51
|
2025/06/13 07:16:51
|
2025/06/13 07:16:51
|
32 ms
|
126 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathon
|
|
6504b11a-5997-41dd-9b7a-85372e54da21
|
2025/06/13 23:38:36
|
2025/06/13 23:38:36
|
2025/06/13 23:38:36
|
24 ms
|
120 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathon
|
|
6750655f-303f-420e-9925-ca0cac7dee66
|
2025/06/14 01:23:34
|
2025/06/14 01:23:34
|
2025/06/14 01:23:35
|
91 ms
|
245 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#4463, data_type#4464, comment#4465]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4463, data_type#4464, comment#4465]
== Optimized Logical Plan ==
CommandResult [col_name#4463, data_type#4464, comment#4465], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4463, data_type#4464, comment#4465]
== Physical Plan ==
CommandResult [col_name#4463, data_type#4464, comment#4465]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4463, data_type#4464, comment#4465]
|
jonathon
|
|
680ed7d0-342a-4620-b3a1-2f57b0058141
|
2025/06/13 22:18:18
|
2025/06/13 22:18:18
|
2025/06/13 22:18:18
|
27 ms
|
122 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathan
|
|
69bf0b73-65ae-4ec0-b457-1ad9a9ab7c3f
|
2025/06/13 23:27:02
|
2025/06/13 23:27:02
|
2025/06/13 23:27:02
|
30 ms
|
363 ms
|
SHOW TABLES IN `c3ba675f1fb64660ba4a90155b35924e`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#3728, tableName#3729, isTemporary#3730]
+- 'UnresolvedNamespace [c3ba675f1fb64660ba4a90155b35924e]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#3728, tableName#3729, isTemporary#3730]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e]
== Optimized Logical Plan ==
CommandResult [namespace#3728, tableName#3729, isTemporary#3730], ShowTables [namespace#3728, tableName#3729, isTemporary#3730], V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e], [[0,2000000020,400000000c,0,6635373661623363,3036363436626631,3531303961346162,6534323935336235,69746e656469796d,72656966]]
+- ShowTables [namespace#3728, tableName#3729, isTemporary#3730]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e]
== Physical Plan ==
CommandResult [namespace#3728, tableName#3729, isTemporary#3730]
+- ShowTables [namespace#3728, tableName#3729, isTemporary#3730], V2SessionCatalog(spark_catalog), [c3ba675f1fb64660ba4a90155b35924e]
|
jonathon
|
|
6e627648-02c6-4e67-a474-ba16f6865bfb
|
2025/06/14 06:31:58
|
2025/06/14 06:31:58
|
2025/06/14 06:31:58
|
53 ms
|
147 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathon
|
|
71efc595-91a8-4084-ad08-c1f50a5b6952
|
2025/06/13 23:20:56
|
2025/06/13 23:20:56
|
2025/06/13 23:20:56
|
80 ms
|
177 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#3284, data_type#3285, comment#3286]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#3284, data_type#3285, comment#3286]
== Optimized Logical Plan ==
CommandResult [col_name#3284, data_type#3285, comment#3286], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#3284, data_type#3285, comment#3286]
== Physical Plan ==
CommandResult [col_name#3284, data_type#3285, comment#3286]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#3284, data_type#3285, comment#3286]
|
jonathon
|
|
73745f82-25b8-47d8-967b-3efa1b283367
|
2025/06/15 06:45:37
|
2025/06/15 06:45:37
|
2025/06/15 06:45:37
|
33 ms
|
262 ms
|
set -v
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
SetCommand (-v,None)
== Analyzed Logical Plan ==
key: string, value: string, meaning: string, Since version: string
SetCommand (-v,None)
== Optimized Logical Plan ==
CommandResult [key#5423, value#5424, meaning#5425, Since version#5426], Execute SetCommand, [[spark.sql.adaptive.advisoryPartitionSizeInBytes,<value of spark.sql.adaptive.shuffle.targetPostShuffleInputSize>,The advisory size in bytes of the shuffle partition during adaptive optimization (when spark.sql.adaptive.enabled is true). It takes effect when Spark coalesces small shuffle partitions or splits skewed shuffle partition.,3.0.0], [spark.sql.adaptive.autoBroadcastJoinThreshold,<undefined>,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. The default value is same with spark.sql.autoBroadcastJoinThreshold. Note that, this config is used only in adaptive framework.,3.2.0], [spark.sql.adaptive.coalescePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will coalesce contiguous shuffle partitions according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid too many small tasks.,3.0.0], [spark.sql.adaptive.coalescePartitions.initialPartitionNum,<undefined>,The initial number of shuffle partitions before coalescing. If not set, it equals to spark.sql.shuffle.partitions. This configuration only has an effect when 'spark.sql.adaptive.enabled' and 'spark.sql.adaptive.coalescePartitions.enabled' are both true.,3.0.0], [spark.sql.adaptive.coalescePartitions.minPartitionSize,1MB,The minimum size of shuffle partitions after coalescing. This is useful when the adaptively calculated target size is too small during partition coalescing.,3.2.0], [spark.sql.adaptive.coalescePartitions.parallelismFirst,true,When true, Spark does not respect the target size specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes' (default 64MB) when coalescing contiguous shuffle partitions, but adaptively calculate the target size according to the default parallelism of the Spark cluster. The calculated size is usually smaller than the configured target size. This is to maximize the parallelism and avoid performance regression when enabling adaptive query execution. It's recommended to set this config to false and respect the configured target size.,3.2.0], [spark.sql.adaptive.customCostEvaluatorClass,<undefined>,The custom cost evaluator class to be used for adaptive execution. If not being set, Spark will use its own SimpleCostEvaluator by default.,3.2.0], [spark.sql.adaptive.enabled,true,When true, enable adaptive query execution, which re-optimizes the query plan in the middle of query execution, based on accurate runtime statistics.,1.6.0], [spark.sql.adaptive.forceOptimizeSkewedJoin,false,When true, force enable OptimizeSkewedJoin even if it introduces extra shuffle.,3.3.0], [spark.sql.adaptive.localShuffleReader.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark tries to use local shuffle reader to read the shuffle data when the shuffle partitioning is not needed, for example, after converting sort-merge join to broadcast-hash join.,3.0.0], [spark.sql.adaptive.maxShuffledHashJoinLocalMapThreshold,0b,Configures the maximum size in bytes per partition that can be allowed to build local hash map. If this value is not smaller than spark.sql.adaptive.advisoryPartitionSizeInBytes and all the partition size are not larger than this config, join selection prefer to use shuffled hash join instead of sort merge join regardless of the value of spark.sql.join.preferSortMergeJoin.,3.2.0], [spark.sql.adaptive.optimizeSkewsInRebalancePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will optimize the skewed shuffle partitions in RebalancePartitions and split them to smaller ones according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid data skew.,3.2.0], [spark.sql.adaptive.optimizer.excludedRules,<undefined>,Configures a list of rules to be disabled in the adaptive optimizer, in which the rules are specified by their rule names and separated by comma. The optimizer will log the rules that have indeed been excluded.,3.1.0], [spark.sql.adaptive.rebalancePartitionsSmallPartitionFactor,0.2,A partition will be merged during splitting if its size is small than this factor multiply spark.sql.adaptive.advisoryPartitionSizeInBytes.,3.3.0], [spark.sql.adaptive.skewJoin.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark dynamically handles skew in shuffled join (sort-merge and shuffled hash) by splitting (and replicating if needed) skewed partitions.,3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionFactor,5.0,A partition is considered as skewed if its size is larger than this factor multiplying the median partition size and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes',3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes,256MB,A partition is considered as skewed if its size in bytes is larger than this threshold and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionFactor' multiplying the median partition size. Ideally this config should be set larger than 'spark.sql.adaptive.advisoryPartitionSizeInBytes'.,3.0.0], [spark.sql.allowNamedFunctionArguments,true,If true, Spark will turn on support for named parameters for all functions that has it implemented.,3.5.0], [spark.sql.ansi.doubleQuotedIdentifiers,false,When true and 'spark.sql.ansi.enabled' is true, Spark SQL reads literals enclosed in double quoted (") as identifiers. When false they are read as string literals.,3.4.0], [spark.sql.ansi.enabled,false,When true, Spark SQL uses an ANSI compliant dialect instead of being Hive compliant. For example, Spark will throw an exception at runtime instead of returning null results when the inputs to a SQL operator/function are invalid.For full details of this dialect, you can find them in the section "ANSI Compliance" of Spark's documentation. Some ANSI dialect features may be not from the ANSI SQL standard directly, but their behaviors align with ANSI SQL's style,3.0.0], [spark.sql.ansi.enforceReservedKeywords,false,When true and 'spark.sql.ansi.enabled' is true, the Spark SQL parser enforces the ANSI reserved keywords and forbids SQL queries that use reserved keywords as alias names and/or identifiers for table, view, function, etc.,3.3.0], [spark.sql.ansi.relationPrecedence,false,When true and 'spark.sql.ansi.enabled' is true, JOIN takes precedence over comma when combining relation. For example, `t1, t2 JOIN t3` should result to `t1 X (t2 X t3)`. If the config is false, the result is `(t1 X t2) X t3`.,3.4.0], [spark.sql.autoBroadcastJoinThreshold,10MB,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. Note that currently statistics are only supported for Hive Metastore tables where the command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been run, and file-based data source tables where the statistics are computed directly on the files of data.,1.1.0], [spark.sql.avro.compression.codec,snappy,Compression codec used in writing of AVRO files. Supported codecs: uncompressed, deflate, snappy, bzip2, xz and zstandard. Default codec is snappy.,2.4.0], ... 183 more fields]
+- SetCommand (-v,None)
== Physical Plan ==
CommandResult [key#5423, value#5424, meaning#5425, Since version#5426]
+- Execute SetCommand
+- SetCommand (-v,None)
|
jonathon
|
[40]
|
73e233e0-56bd-41c0-8dba-7883655f69f6
|
2025/06/13 23:29:59
|
2025/06/13 23:29:59
|
2025/06/13 23:29:59
|
192 ms
|
380 ms
|
SELECT C_1 AS C_20, C_2 AS C_18, C_3 AS C_21, C_4331 AS C_22, C_4332 AS C_25, C_4333 AS C_19, C_0 AS C_17, C_43 AS C_12, C_10 AS C_15, C_8 AS C_23, C_7 AS C_13, C_9 AS C_16, C_11 AS C_14, C_6 AS C_24 FROM (SELECT C_64656661756c745f616972706f727473.`id` AS C_1, C_64656661756c745f616972706f727473.`type` AS C_2, C_64656661756c745f616972706f727473.`name` AS C_3, C_64656661756c745f616972706f727473.`lat` AS C_4, C_64656661756c745f616972706f727473.`lon` AS C_5, C_64656661756c745f616972706f727473.`elev` AS C_6, C_64656661756c745f616972706f727473.`continent` AS C_0, C_64656661756c745f616972706f727473.`country` AS C_43, C_64656661756c745f616972706f727473.`region` AS C_10, C_64656661756c745f616972706f727473.`city` AS C_8, C_64656661756c745f616972706f727473.`iata` AS C_7, C_64656661756c745f616972706f727473.`code` AS C_9, C_64656661756c745f616972706f727473.`gps` AS C_11, (round((C_64656661756c745f616972706f727473.`lat` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4331, (round((C_64656661756c745f616972706f727473.`lon` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4332, (round((C_64656661756c745f616972706f727473.`elev` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4333 FROM `default`.`airports` C_64656661756c745f616972706f727473 WHERE ((C_64656661756c745f616972706f727473.`lon` <= (- 1.040500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lon` >= (- 1.110500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lat` >= 4.100000000000000E+001) AND (C_64656661756c745f616972706f727473.`lat` <= 4.500000000000000E+001)) ) C_4954424c ORDER BY C_24 DESC LIMIT 5
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'GlobalLimit 5
+- 'LocalLimit 5
+- 'Sort ['C_24 DESC NULLS LAST], true
+- 'Project ['C_1 AS C_20#3895, 'C_2 AS C_18#3896, 'C_3 AS C_21#3897, 'C_4331 AS C_22#3898, 'C_4332 AS C_25#3899, 'C_4333 AS C_19#3900, 'C_0 AS C_17#3901, 'C_43 AS C_12#3902, 'C_10 AS C_15#3903, 'C_8 AS C_23#3904, 'C_7 AS C_13#3905, 'C_9 AS C_16#3906, 'C_11 AS C_14#3907, 'C_6 AS C_24#3908]
+- 'SubqueryAlias C_4954424c
+- 'Project ['C_64656661756c745f616972706f727473.id AS C_1#3879, 'C_64656661756c745f616972706f727473.type AS C_2#3880, 'C_64656661756c745f616972706f727473.name AS C_3#3881, 'C_64656661756c745f616972706f727473.lat AS C_4#3882, 'C_64656661756c745f616972706f727473.lon AS C_5#3883, 'C_64656661756c745f616972706f727473.elev AS C_6#3884, 'C_64656661756c745f616972706f727473.continent AS C_0#3885, 'C_64656661756c745f616972706f727473.country AS C_43#3886, 'C_64656661756c745f616972706f727473.region AS C_10#3887, 'C_64656661756c745f616972706f727473.city AS C_8#3888, 'C_64656661756c745f616972706f727473.iata AS C_7#3889, 'C_64656661756c745f616972706f727473.code AS C_9#3890, 'C_64656661756c745f616972706f727473.gps AS C_11#3891, ('round(('C_64656661756c745f616972706f727473.lat * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4331#3892, ('round(('C_64656661756c745f616972706f727473.lon * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4332#3893, ('round(('C_64656661756c745f616972706f727473.elev * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4333#3894]
+- 'Filter ((('C_64656661756c745f616972706f727473.lon <= -104.05) AND ('C_64656661756c745f616972706f727473.lon >= -111.05)) AND (('C_64656661756c745f616972706f727473.lat >= 41.0) AND ('C_64656661756c745f616972706f727473.lat <= 45.0)))
+- 'SubqueryAlias C_64656661756c745f616972706f727473
+- 'UnresolvedRelation [default, airports], [], false
== Analyzed Logical Plan ==
C_20: string, C_18: string, C_21: string, C_22: double, C_25: double, C_19: double, C_17: string, C_12: string, C_15: string, C_23: string, C_13: string, C_16: string, C_14: string, C_24: double
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_24#3908 DESC NULLS LAST], true
+- Project [C_1#3879 AS C_20#3895, C_2#3880 AS C_18#3896, C_3#3881 AS C_21#3897, C_4331#3892 AS C_22#3898, C_4332#3893 AS C_25#3899, C_4333#3894 AS C_19#3900, C_0#3885 AS C_17#3901, C_43#3886 AS C_12#3902, C_10#3887 AS C_15#3903, C_8#3888 AS C_23#3904, C_7#3889 AS C_13#3905, C_9#3890 AS C_16#3906, C_11#3891 AS C_14#3907, C_6#3884 AS C_24#3908]
+- SubqueryAlias C_4954424c
+- Project [id#3909 AS C_1#3879, type#3910 AS C_2#3880, name#3911 AS C_3#3881, lat#3912 AS C_4#3882, lon#3913 AS C_5#3883, elev#3914 AS C_6#3884, continent#3915 AS C_0#3885, country#3916 AS C_43#3886, region#3917 AS C_10#3887, city#3918 AS C_8#3888, iata#3919 AS C_7#3889, code#3920 AS C_9#3890, gps#3921 AS C_11#3891, (round((lat#3912 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4331#3892, (round((lon#3913 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4332#3893, (round((elev#3914 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4333#3894]
+- Filter (((lon#3913 <= -104.05) AND (lon#3913 >= -111.05)) AND ((lat#3912 >= 41.0) AND (lat#3912 <= 45.0)))
+- SubqueryAlias C_64656661756c745f616972706f727473
+- SubqueryAlias spark_catalog.default.airports
+- Relation spark_catalog.default.airports[id#3909,type#3910,name#3911,lat#3912,lon#3913,elev#3914,continent#3915,country#3916,region#3917,city#3918,iata#3919,code#3920,gps#3921] parquet
== Optimized Logical Plan ==
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_24#3908 DESC NULLS LAST], true
+- Project [id#3909 AS C_20#3895, type#3910 AS C_18#3896, name#3911 AS C_21#3897, (round((lat#3912 * 1000.0), 0) / 1000.0) AS C_22#3898, (round((lon#3913 * 1000.0), 0) / 1000.0) AS C_25#3899, (round((elev#3914 * 1000.0), 0) / 1000.0) AS C_19#3900, continent#3915 AS C_17#3901, country#3916 AS C_12#3902, region#3917 AS C_15#3903, city#3918 AS C_23#3904, iata#3919 AS C_13#3905, code#3920 AS C_16#3906, gps#3921 AS C_14#3907, elev#3914 AS C_24#3908]
+- Filter ((isnotnull(lon#3913) AND isnotnull(lat#3912)) AND (((lon#3913 <= -104.05) AND (lon#3913 >= -111.05)) AND ((lat#3912 >= 41.0) AND (lat#3912 <= 45.0))))
+- Relation spark_catalog.default.airports[id#3909,type#3910,name#3911,lat#3912,lon#3913,elev#3914,continent#3915,country#3916,region#3917,city#3918,iata#3919,code#3920,gps#3921] parquet
== Physical Plan ==
TakeOrderedAndProject(limit=5, orderBy=[C_24#3908 DESC NULLS LAST], output=[C_20#3895,C_18#3896,C_21#3897,C_22#3898,C_25#3899,C_19#3900,C_17#3901,C_12#3902,C_15#3903,C_23#3904,C_13#3905,C_16#3906,C_14#3907,C_24#3908])
+- *(1) Project [id#3909 AS C_20#3895, type#3910 AS C_18#3896, name#3911 AS C_21#3897, (round((lat#3912 * 1000.0), 0) / 1000.0) AS C_22#3898, (round((lon#3913 * 1000.0), 0) / 1000.0) AS C_25#3899, (round((elev#3914 * 1000.0), 0) / 1000.0) AS C_19#3900, continent#3915 AS C_17#3901, country#3916 AS C_12#3902, region#3917 AS C_15#3903, city#3918 AS C_23#3904, iata#3919 AS C_13#3905, code#3920 AS C_16#3906, gps#3921 AS C_14#3907, elev#3914 AS C_24#3908]
+- *(1) Filter (((((isnotnull(lon#3913) AND isnotnull(lat#3912)) AND (lon#3913 <= -104.05)) AND (lon#3913 >= -111.05)) AND (lat#3912 >= 41.0)) AND (lat#3912 <= 45.0))
+- *(1) ColumnarToRow
+- FileScan parquet spark_catalog.default.airports[id#3909,type#3910,name#3911,lat#3912,lon#3913,elev#3914,continent#3915,country#3916,region#3917,city#3918,iata#3919,code#3920,gps#3921] Batched: true, DataFilters: [isnotnull(lon#3913), isnotnull(lat#3912), (lon#3913 <= -104.05), (lon#3913 >= -111.05), (lat#391..., Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [IsNotNull(lon), IsNotNull(lat), LessThanOrEqual(lon,-104.05), GreaterThanOrEqual(lon,-111.05), G..., ReadSchema: struct<id:string,type:string,name:string,lat:double,lon:double,elev:double,continent:string,count...
|
jonathan
|
|
79d4a8cf-f37e-4a5d-a268-9892c179688f
|
2025/06/13 23:34:51
|
2025/06/13 23:34:51
|
2025/06/13 23:34:52
|
10 ms
|
319 ms
|
SHOW TABLES IN `global_temp`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#4128, tableName#4129, isTemporary#4130]
+- 'UnresolvedNamespace [global_temp]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#4128, tableName#4129, isTemporary#4130]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [global_temp]
== Optimized Logical Plan ==
CommandResult [namespace#4128, tableName#4129, isTemporary#4130], ShowTables [namespace#4128, tableName#4129, isTemporary#4130], V2SessionCatalog(spark_catalog), [global_temp]
+- ShowTables [namespace#4128, tableName#4129, isTemporary#4130]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [global_temp]
== Physical Plan ==
CommandResult <empty>, [namespace#4128, tableName#4129, isTemporary#4130]
+- ShowTables [namespace#4128, tableName#4129, isTemporary#4130], V2SessionCatalog(spark_catalog), [global_temp]
|
jonathon
|
|
7db97e51-8703-4fc1-8f61-2a343cb12304
|
2025/06/13 22:44:55
|
2025/06/13 22:44:55
|
2025/06/13 22:44:56
|
23 ms
|
132 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathon
|
|
7e52e6cd-b958-4c5c-b831-6a246dd98439
|
2025/06/13 23:20:55
|
2025/06/13 23:20:55
|
2025/06/13 23:20:55
|
232 ms
|
327 ms
|
Listing tables 'catalog : null, schemaPattern : %, tableTypes : null, tableName : %'
|
CLOSED
|
|
jonathan
|
|
7e70771a-b47a-45c0-be4f-ef4ae70a2a9d
|
2025/06/13 22:39:07
|
2025/06/13 22:39:07
|
2025/06/13 22:39:08
|
12 ms
|
322 ms
|
SHOW TABLES IN `global_temp`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#2787, tableName#2788, isTemporary#2789]
+- 'UnresolvedNamespace [global_temp]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#2787, tableName#2788, isTemporary#2789]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [global_temp]
== Optimized Logical Plan ==
CommandResult [namespace#2787, tableName#2788, isTemporary#2789], ShowTables [namespace#2787, tableName#2788, isTemporary#2789], V2SessionCatalog(spark_catalog), [global_temp]
+- ShowTables [namespace#2787, tableName#2788, isTemporary#2789]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [global_temp]
== Physical Plan ==
CommandResult <empty>, [namespace#2787, tableName#2788, isTemporary#2789]
+- ShowTables [namespace#2787, tableName#2788, isTemporary#2789], V2SessionCatalog(spark_catalog), [global_temp]
|
jonathan
|
|
8081e0a0-fa1f-4bd2-a749-eebb493b1c08
|
2025/06/13 22:51:51
|
2025/06/13 22:51:51
|
2025/06/13 22:51:51
|
29 ms
|
357 ms
|
Listing databases 'catalog : , schemaPattern : null'
|
CLOSED
|
|
jonathon
|
|
8085a422-355a-48ff-88af-508c1d5bdb35
|
2025/06/14 05:46:25
|
2025/06/14 05:46:25
|
2025/06/14 05:46:26
|
39 ms
|
272 ms
|
set -v
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
SetCommand (-v,None)
== Analyzed Logical Plan ==
key: string, value: string, meaning: string, Since version: string
SetCommand (-v,None)
== Optimized Logical Plan ==
CommandResult [key#4847, value#4848, meaning#4849, Since version#4850], Execute SetCommand, [[spark.sql.adaptive.advisoryPartitionSizeInBytes,<value of spark.sql.adaptive.shuffle.targetPostShuffleInputSize>,The advisory size in bytes of the shuffle partition during adaptive optimization (when spark.sql.adaptive.enabled is true). It takes effect when Spark coalesces small shuffle partitions or splits skewed shuffle partition.,3.0.0], [spark.sql.adaptive.autoBroadcastJoinThreshold,<undefined>,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. The default value is same with spark.sql.autoBroadcastJoinThreshold. Note that, this config is used only in adaptive framework.,3.2.0], [spark.sql.adaptive.coalescePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will coalesce contiguous shuffle partitions according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid too many small tasks.,3.0.0], [spark.sql.adaptive.coalescePartitions.initialPartitionNum,<undefined>,The initial number of shuffle partitions before coalescing. If not set, it equals to spark.sql.shuffle.partitions. This configuration only has an effect when 'spark.sql.adaptive.enabled' and 'spark.sql.adaptive.coalescePartitions.enabled' are both true.,3.0.0], [spark.sql.adaptive.coalescePartitions.minPartitionSize,1MB,The minimum size of shuffle partitions after coalescing. This is useful when the adaptively calculated target size is too small during partition coalescing.,3.2.0], [spark.sql.adaptive.coalescePartitions.parallelismFirst,true,When true, Spark does not respect the target size specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes' (default 64MB) when coalescing contiguous shuffle partitions, but adaptively calculate the target size according to the default parallelism of the Spark cluster. The calculated size is usually smaller than the configured target size. This is to maximize the parallelism and avoid performance regression when enabling adaptive query execution. It's recommended to set this config to false and respect the configured target size.,3.2.0], [spark.sql.adaptive.customCostEvaluatorClass,<undefined>,The custom cost evaluator class to be used for adaptive execution. If not being set, Spark will use its own SimpleCostEvaluator by default.,3.2.0], [spark.sql.adaptive.enabled,true,When true, enable adaptive query execution, which re-optimizes the query plan in the middle of query execution, based on accurate runtime statistics.,1.6.0], [spark.sql.adaptive.forceOptimizeSkewedJoin,false,When true, force enable OptimizeSkewedJoin even if it introduces extra shuffle.,3.3.0], [spark.sql.adaptive.localShuffleReader.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark tries to use local shuffle reader to read the shuffle data when the shuffle partitioning is not needed, for example, after converting sort-merge join to broadcast-hash join.,3.0.0], [spark.sql.adaptive.maxShuffledHashJoinLocalMapThreshold,0b,Configures the maximum size in bytes per partition that can be allowed to build local hash map. If this value is not smaller than spark.sql.adaptive.advisoryPartitionSizeInBytes and all the partition size are not larger than this config, join selection prefer to use shuffled hash join instead of sort merge join regardless of the value of spark.sql.join.preferSortMergeJoin.,3.2.0], [spark.sql.adaptive.optimizeSkewsInRebalancePartitions.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark will optimize the skewed shuffle partitions in RebalancePartitions and split them to smaller ones according to the target size (specified by 'spark.sql.adaptive.advisoryPartitionSizeInBytes'), to avoid data skew.,3.2.0], [spark.sql.adaptive.optimizer.excludedRules,<undefined>,Configures a list of rules to be disabled in the adaptive optimizer, in which the rules are specified by their rule names and separated by comma. The optimizer will log the rules that have indeed been excluded.,3.1.0], [spark.sql.adaptive.rebalancePartitionsSmallPartitionFactor,0.2,A partition will be merged during splitting if its size is small than this factor multiply spark.sql.adaptive.advisoryPartitionSizeInBytes.,3.3.0], [spark.sql.adaptive.skewJoin.enabled,true,When true and 'spark.sql.adaptive.enabled' is true, Spark dynamically handles skew in shuffled join (sort-merge and shuffled hash) by splitting (and replicating if needed) skewed partitions.,3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionFactor,5.0,A partition is considered as skewed if its size is larger than this factor multiplying the median partition size and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes',3.0.0], [spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes,256MB,A partition is considered as skewed if its size in bytes is larger than this threshold and also larger than 'spark.sql.adaptive.skewJoin.skewedPartitionFactor' multiplying the median partition size. Ideally this config should be set larger than 'spark.sql.adaptive.advisoryPartitionSizeInBytes'.,3.0.0], [spark.sql.allowNamedFunctionArguments,true,If true, Spark will turn on support for named parameters for all functions that has it implemented.,3.5.0], [spark.sql.ansi.doubleQuotedIdentifiers,false,When true and 'spark.sql.ansi.enabled' is true, Spark SQL reads literals enclosed in double quoted (") as identifiers. When false they are read as string literals.,3.4.0], [spark.sql.ansi.enabled,false,When true, Spark SQL uses an ANSI compliant dialect instead of being Hive compliant. For example, Spark will throw an exception at runtime instead of returning null results when the inputs to a SQL operator/function are invalid.For full details of this dialect, you can find them in the section "ANSI Compliance" of Spark's documentation. Some ANSI dialect features may be not from the ANSI SQL standard directly, but their behaviors align with ANSI SQL's style,3.0.0], [spark.sql.ansi.enforceReservedKeywords,false,When true and 'spark.sql.ansi.enabled' is true, the Spark SQL parser enforces the ANSI reserved keywords and forbids SQL queries that use reserved keywords as alias names and/or identifiers for table, view, function, etc.,3.3.0], [spark.sql.ansi.relationPrecedence,false,When true and 'spark.sql.ansi.enabled' is true, JOIN takes precedence over comma when combining relation. For example, `t1, t2 JOIN t3` should result to `t1 X (t2 X t3)`. If the config is false, the result is `(t1 X t2) X t3`.,3.4.0], [spark.sql.autoBroadcastJoinThreshold,10MB,Configures the maximum size in bytes for a table that will be broadcast to all worker nodes when performing a join. By setting this value to -1 broadcasting can be disabled. Note that currently statistics are only supported for Hive Metastore tables where the command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been run, and file-based data source tables where the statistics are computed directly on the files of data.,1.1.0], [spark.sql.avro.compression.codec,snappy,Compression codec used in writing of AVRO files. Supported codecs: uncompressed, deflate, snappy, bzip2, xz and zstandard. Default codec is snappy.,2.4.0], ... 183 more fields]
+- SetCommand (-v,None)
== Physical Plan ==
CommandResult [key#4847, value#4848, meaning#4849, Since version#4850]
+- Execute SetCommand
+- SetCommand (-v,None)
|
jonathon
|
|
8114f5b7-6ee0-40f1-942f-9ea912c309c5
|
2025/06/15 06:48:31
|
2025/06/15 06:48:31
|
2025/06/15 06:48:31
|
23 ms
|
116 ms
|
Listing columns 'catalog : null, schemaPattern : default, tablePattern : airports, columnName : null'
|
CLOSED
|
|
jonathan
|
|
82681c0c-59dc-4ddb-a8bf-459d962d6b81
|
2025/06/13 23:21:12
|
2025/06/13 23:21:12
|
2025/06/13 23:21:12
|
20 ms
|
327 ms
|
SHOW TABLES IN `test`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'ShowTables [namespace#3440, tableName#3441, isTemporary#3442]
+- 'UnresolvedNamespace [test]
== Analyzed Logical Plan ==
namespace: string, tableName: string, isTemporary: boolean
ShowTables [namespace#3440, tableName#3441, isTemporary#3442]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [test]
== Optimized Logical Plan ==
CommandResult [namespace#3440, tableName#3441, isTemporary#3442], ShowTables [namespace#3440, tableName#3441, isTemporary#3442], V2SessionCatalog(spark_catalog), [test]
+- ShowTables [namespace#3440, tableName#3441, isTemporary#3442]
+- ResolvedNamespace V2SessionCatalog(spark_catalog), [test]
== Physical Plan ==
CommandResult <empty>, [namespace#3440, tableName#3441, isTemporary#3442]
+- ShowTables [namespace#3440, tableName#3441, isTemporary#3442], V2SessionCatalog(spark_catalog), [test]
|
jonathan
|
|
82ece942-c4e6-406e-9366-aed2ae67c729
|
2025/06/13 22:39:04
|
2025/06/13 22:39:04
|
2025/06/13 22:39:05
|
12 ms
|
770 ms
|
Listing catalogs
|
CLOSED
|
|
jonathon
|
|
8418ef72-3034-46eb-9919-57a3a94cb1a3
|
2025/06/13 07:16:51
|
2025/06/13 07:16:51
|
2025/06/13 07:16:51
|
251 ms
|
346 ms
|
Listing tables 'catalog : null, schemaPattern : %, tableTypes : null, tableName : %'
|
CLOSED
|
|
jonathan
|
|
844a111b-04dd-49a5-ba46-598dd022910a
|
2025/06/13 23:27:01
|
2025/06/13 23:27:01
|
2025/06/13 23:27:02
|
15 ms
|
713 ms
|
Listing catalogs
|
CLOSED
|
|
jonathon
|
|
8475f456-16b1-4470-9ca7-b4841d9b2d3e
|
2025/06/13 23:38:36
|
2025/06/13 23:38:36
|
2025/06/13 23:38:36
|
80 ms
|
177 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#4242, data_type#4243, comment#4244]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4242, data_type#4243, comment#4244]
== Optimized Logical Plan ==
CommandResult [col_name#4242, data_type#4243, comment#4244], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4242, data_type#4243, comment#4244]
== Physical Plan ==
CommandResult [col_name#4242, data_type#4243, comment#4244]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4242, data_type#4243, comment#4244]
|
jonathon
|
[35]
|
8941a51a-eff4-4b55-b556-60fcfdad821f
|
2025/06/13 22:18:18
|
2025/06/13 22:18:19
|
2025/06/13 22:18:19
|
267 ms
|
366 ms
|
SELECT C_3 AS C_18, C_0 AS C_12, C_4 AS C_19, C_4331 AS C_14, C_4332 AS C_17, C_4333 AS C_22, C_7 AS C_23, C_8 AS C_15, C_9 AS C_20, C_10 AS C_16, C_43 AS C_25, C_2 AS C_13, C_11 AS C_24, C_1 AS C_21 FROM (SELECT C_64656661756c745f616972706f727473.`id` AS C_3, C_64656661756c745f616972706f727473.`type` AS C_0, C_64656661756c745f616972706f727473.`name` AS C_4, C_64656661756c745f616972706f727473.`lat` AS C_5, C_64656661756c745f616972706f727473.`lon` AS C_6, C_64656661756c745f616972706f727473.`elev` AS C_1, C_64656661756c745f616972706f727473.`continent` AS C_7, C_64656661756c745f616972706f727473.`country` AS C_8, C_64656661756c745f616972706f727473.`region` AS C_9, C_64656661756c745f616972706f727473.`city` AS C_10, C_64656661756c745f616972706f727473.`iata` AS C_43, C_64656661756c745f616972706f727473.`code` AS C_2, C_64656661756c745f616972706f727473.`gps` AS C_11, (round((C_64656661756c745f616972706f727473.`lat` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4331, (round((C_64656661756c745f616972706f727473.`lon` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4332, (round((C_64656661756c745f616972706f727473.`elev` * power(1.000000000000000E+001, 3)), 0) / power(1.000000000000000E+001, 3)) AS C_4333 FROM `default`.`airports` C_64656661756c745f616972706f727473 WHERE ((C_64656661756c745f616972706f727473.`lon` <= (- 1.040500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lon` >= (- 1.110500000000000E+002)) AND (C_64656661756c745f616972706f727473.`lat` >= 4.100000000000000E+001) AND (C_64656661756c745f616972706f727473.`lat` <= 4.500000000000000E+001)) ) C_4954424c ORDER BY C_21 DESC LIMIT 5
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'GlobalLimit 5
+- 'LocalLimit 5
+- 'Sort ['C_21 DESC NULLS LAST], true
+- 'Project ['C_3 AS C_18#2516, 'C_0 AS C_12#2517, 'C_4 AS C_19#2518, 'C_4331 AS C_14#2519, 'C_4332 AS C_17#2520, 'C_4333 AS C_22#2521, 'C_7 AS C_23#2522, 'C_8 AS C_15#2523, 'C_9 AS C_20#2524, 'C_10 AS C_16#2525, 'C_43 AS C_25#2526, 'C_2 AS C_13#2527, 'C_11 AS C_24#2528, 'C_1 AS C_21#2529]
+- 'SubqueryAlias C_4954424c
+- 'Project ['C_64656661756c745f616972706f727473.id AS C_3#2500, 'C_64656661756c745f616972706f727473.type AS C_0#2501, 'C_64656661756c745f616972706f727473.name AS C_4#2502, 'C_64656661756c745f616972706f727473.lat AS C_5#2503, 'C_64656661756c745f616972706f727473.lon AS C_6#2504, 'C_64656661756c745f616972706f727473.elev AS C_1#2505, 'C_64656661756c745f616972706f727473.continent AS C_7#2506, 'C_64656661756c745f616972706f727473.country AS C_8#2507, 'C_64656661756c745f616972706f727473.region AS C_9#2508, 'C_64656661756c745f616972706f727473.city AS C_10#2509, 'C_64656661756c745f616972706f727473.iata AS C_43#2510, 'C_64656661756c745f616972706f727473.code AS C_2#2511, 'C_64656661756c745f616972706f727473.gps AS C_11#2512, ('round(('C_64656661756c745f616972706f727473.lat * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4331#2513, ('round(('C_64656661756c745f616972706f727473.lon * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4332#2514, ('round(('C_64656661756c745f616972706f727473.elev * 'power(10.0, 3)), 0) / 'power(10.0, 3)) AS C_4333#2515]
+- 'Filter ((('C_64656661756c745f616972706f727473.lon <= -104.05) AND ('C_64656661756c745f616972706f727473.lon >= -111.05)) AND (('C_64656661756c745f616972706f727473.lat >= 41.0) AND ('C_64656661756c745f616972706f727473.lat <= 45.0)))
+- 'SubqueryAlias C_64656661756c745f616972706f727473
+- 'UnresolvedRelation [default, airports], [], false
== Analyzed Logical Plan ==
C_18: string, C_12: string, C_19: string, C_14: double, C_17: double, C_22: double, C_23: string, C_15: string, C_20: string, C_16: string, C_25: string, C_13: string, C_24: string, C_21: double
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_21#2529 DESC NULLS LAST], true
+- Project [C_3#2500 AS C_18#2516, C_0#2501 AS C_12#2517, C_4#2502 AS C_19#2518, C_4331#2513 AS C_14#2519, C_4332#2514 AS C_17#2520, C_4333#2515 AS C_22#2521, C_7#2506 AS C_23#2522, C_8#2507 AS C_15#2523, C_9#2508 AS C_20#2524, C_10#2509 AS C_16#2525, C_43#2510 AS C_25#2526, C_2#2511 AS C_13#2527, C_11#2512 AS C_24#2528, C_1#2505 AS C_21#2529]
+- SubqueryAlias C_4954424c
+- Project [id#2530 AS C_3#2500, type#2531 AS C_0#2501, name#2532 AS C_4#2502, lat#2533 AS C_5#2503, lon#2534 AS C_6#2504, elev#2535 AS C_1#2505, continent#2536 AS C_7#2506, country#2537 AS C_8#2507, region#2538 AS C_9#2508, city#2539 AS C_10#2509, iata#2540 AS C_43#2510, code#2541 AS C_2#2511, gps#2542 AS C_11#2512, (round((lat#2533 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4331#2513, (round((lon#2534 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4332#2514, (round((elev#2535 * POWER(10.0, cast(3 as double))), 0) / POWER(10.0, cast(3 as double))) AS C_4333#2515]
+- Filter (((lon#2534 <= -104.05) AND (lon#2534 >= -111.05)) AND ((lat#2533 >= 41.0) AND (lat#2533 <= 45.0)))
+- SubqueryAlias C_64656661756c745f616972706f727473
+- SubqueryAlias spark_catalog.default.airports
+- Relation spark_catalog.default.airports[id#2530,type#2531,name#2532,lat#2533,lon#2534,elev#2535,continent#2536,country#2537,region#2538,city#2539,iata#2540,code#2541,gps#2542] parquet
== Optimized Logical Plan ==
GlobalLimit 5
+- LocalLimit 5
+- Sort [C_21#2529 DESC NULLS LAST], true
+- Project [id#2530 AS C_18#2516, type#2531 AS C_12#2517, name#2532 AS C_19#2518, (round((lat#2533 * 1000.0), 0) / 1000.0) AS C_14#2519, (round((lon#2534 * 1000.0), 0) / 1000.0) AS C_17#2520, (round((elev#2535 * 1000.0), 0) / 1000.0) AS C_22#2521, continent#2536 AS C_23#2522, country#2537 AS C_15#2523, region#2538 AS C_20#2524, city#2539 AS C_16#2525, iata#2540 AS C_25#2526, code#2541 AS C_13#2527, gps#2542 AS C_24#2528, elev#2535 AS C_21#2529]
+- Filter ((isnotnull(lon#2534) AND isnotnull(lat#2533)) AND (((lon#2534 <= -104.05) AND (lon#2534 >= -111.05)) AND ((lat#2533 >= 41.0) AND (lat#2533 <= 45.0))))
+- Relation spark_catalog.default.airports[id#2530,type#2531,name#2532,lat#2533,lon#2534,elev#2535,continent#2536,country#2537,region#2538,city#2539,iata#2540,code#2541,gps#2542] parquet
== Physical Plan ==
TakeOrderedAndProject(limit=5, orderBy=[C_21#2529 DESC NULLS LAST], output=[C_18#2516,C_12#2517,C_19#2518,C_14#2519,C_17#2520,C_22#2521,C_23#2522,C_15#2523,C_20#2524,C_16#2525,C_25#2526,C_13#2527,C_24#2528,C_21#2529])
+- *(1) Project [id#2530 AS C_18#2516, type#2531 AS C_12#2517, name#2532 AS C_19#2518, (round((lat#2533 * 1000.0), 0) / 1000.0) AS C_14#2519, (round((lon#2534 * 1000.0), 0) / 1000.0) AS C_17#2520, (round((elev#2535 * 1000.0), 0) / 1000.0) AS C_22#2521, continent#2536 AS C_23#2522, country#2537 AS C_15#2523, region#2538 AS C_20#2524, city#2539 AS C_16#2525, iata#2540 AS C_25#2526, code#2541 AS C_13#2527, gps#2542 AS C_24#2528, elev#2535 AS C_21#2529]
+- *(1) Filter (((((isnotnull(lon#2534) AND isnotnull(lat#2533)) AND (lon#2534 <= -104.05)) AND (lon#2534 >= -111.05)) AND (lat#2533 >= 41.0)) AND (lat#2533 <= 45.0))
+- *(1) ColumnarToRow
+- FileScan parquet spark_catalog.default.airports[id#2530,type#2531,name#2532,lat#2533,lon#2534,elev#2535,continent#2536,country#2537,region#2538,city#2539,iata#2540,code#2541,gps#2542] Batched: true, DataFilters: [isnotnull(lon#2534), isnotnull(lat#2533), (lon#2534 <= -104.05), (lon#2534 >= -111.05), (lat#253..., Format: Parquet, Location: InMemoryFileIndex(1 paths)[file:/home/acdcadmin/spark-warehouse/airports], PartitionFilters: [], PushedFilters: [IsNotNull(lon), IsNotNull(lat), LessThanOrEqual(lon,-104.05), GreaterThanOrEqual(lon,-111.05), G..., ReadSchema: struct<id:string,type:string,name:string,lat:double,lon:double,elev:double,continent:string,count...
|
jonathan
|
[39]
|
8999cf5e-c88b-4980-aa38-13e2c37d95c1
|
2025/06/13 23:21:33
|
2025/06/13 23:21:33
|
2025/06/13 23:21:33
|
130 ms
|
539 ms
|
select `STRING`,
`DOUBLE`,
`INTEGER`,
`BIGINT`,
`FLOAT`,
`DECIMAL`,
`NUMBER`,
`BOOLEAN`,
`DATE`,
`TIMESTAMP`,
`DATETIME`,
`BINARY`,
`ARRAY`,
`MAP`,
`STRUCT`,
`VARCHAR`,
`CHAR`
from `default`.`alltypes`
limit 1000
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'GlobalLimit 1000
+- 'LocalLimit 1000
+- 'Project ['STRING, 'DOUBLE, 'INTEGER, 'BIGINT, 'FLOAT, 'DECIMAL, 'NUMBER, 'BOOLEAN, 'DATE, 'TIMESTAMP, 'DATETIME, 'BINARY, 'ARRAY, 'MAP, 'STRUCT, 'VARCHAR, 'CHAR]
+- 'UnresolvedRelation [default, alltypes], [], false
== Analyzed Logical Plan ==
STRING: string, DOUBLE: double, INTEGER: int, BIGINT: bigint, FLOAT: float, DECIMAL: decimal(10,2), NUMBER: decimal(10,2), BOOLEAN: boolean, DATE: date, TIMESTAMP: timestamp, DATETIME: timestamp, BINARY: binary, ARRAY: array<int>, MAP: map<string,string>, STRUCT: struct<field1:string,field2:int>, VARCHAR: string, CHAR: string
GlobalLimit 1000
+- LocalLimit 1000
+- Project [STRING#3514, DOUBLE#3515, INTEGER#3516, BIGINT#3517L, FLOAT#3518, DECIMAL#3519, NUMBER#3520, BOOLEAN#3521, DATE#3522, TIMESTAMP#3523, DATETIME#3524, BINARY#3525, ARRAY#3526, MAP#3527, STRUCT#3528, VARCHAR#3529, CHAR#3530]
+- SubqueryAlias spark_catalog.default.alltypes
+- HiveTableRelation [`spark_catalog`.`default`.`alltypes`, org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, Data Cols: [STRING#3514, DOUBLE#3515, INTEGER#3516, BIGINT#3517L, FLOAT#3518, DECIMAL#3519, NUMBER#3520, BOO..., Partition Cols: []]
== Optimized Logical Plan ==
GlobalLimit 1000
+- LocalLimit 1000
+- HiveTableRelation [`spark_catalog`.`default`.`alltypes`, org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, Data Cols: [STRING#3514, DOUBLE#3515, INTEGER#3516, BIGINT#3517L, FLOAT#3518, DECIMAL#3519, NUMBER#3520, BOO..., Partition Cols: []]
== Physical Plan ==
CollectLimit 1000
+- Scan hive spark_catalog.default.alltypes [STRING#3514, DOUBLE#3515, INTEGER#3516, BIGINT#3517L, FLOAT#3518, DECIMAL#3519, NUMBER#3520, BOOLEAN#3521, DATE#3522, TIMESTAMP#3523, DATETIME#3524, BINARY#3525, ARRAY#3526, MAP#3527, STRUCT#3528, VARCHAR#3529, CHAR#3530], HiveTableRelation [`spark_catalog`.`default`.`alltypes`, org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, Data Cols: [STRING#3514, DOUBLE#3515, INTEGER#3516, BIGINT#3517L, FLOAT#3518, DECIMAL#3519, NUMBER#3520, BOO..., Partition Cols: []]
|
jonathan
|
|
89b27579-c655-44ef-bf92-6c4bd6c1d8d5
|
2025/06/13 19:06:30
|
2025/06/13 19:06:30
|
2025/06/13 19:06:30
|
86 ms
|
355 ms
|
DESCRIBE TABLE `default`.`alltypes`
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#2402, data_type#2403, comment#2404]
+- 'UnresolvedTableOrView [default, alltypes], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#2402, data_type#2403, comment#2404]
== Optimized Logical Plan ==
CommandResult [col_name#2402, data_type#2403, comment#2404], Execute DescribeTableCommand, [[STRING,string,null], [DOUBLE,double,null], [INTEGER,int,null], [BIGINT,bigint,null], [FLOAT,float,null], [DECIMAL,decimal(10,2),null], [NUMBER,decimal(10,2),null], [BOOLEAN,boolean,null], [DATE,date,null], [TIMESTAMP,timestamp,null], [DATETIME,timestamp,null], [BINARY,binary,null], [ARRAY,array<int>,null], [MAP,map<string,string>,null], [STRUCT,struct<field1:string,field2:int>,null], [VARCHAR,string,null], [CHAR,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#2402, data_type#2403, comment#2404]
== Physical Plan ==
CommandResult [col_name#2402, data_type#2403, comment#2404]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`alltypes`, false, [col_name#2402, data_type#2403, comment#2404]
|
jonathon
|
|
8bbb35ce-97f0-4a70-b383-3d91b7af96a3
|
2025/06/14 01:46:19
|
2025/06/14 01:46:19
|
2025/06/14 01:46:19
|
94 ms
|
191 ms
|
DESCRIBE default.airports
|
CLOSED
|
== Parsed Logical Plan ==
+details
== Parsed Logical Plan ==
'DescribeRelation false, [col_name#4584, data_type#4585, comment#4586]
+- 'UnresolvedTableOrView [default, airports], DESCRIBE TABLE, true
== Analyzed Logical Plan ==
col_name: string, data_type: string, comment: string
DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4584, data_type#4585, comment#4586]
== Optimized Logical Plan ==
CommandResult [col_name#4584, data_type#4585, comment#4586], Execute DescribeTableCommand, [[id,string,null], [type,string,null], [name,string,null], [lat,double,null], [lon,double,null], [elev,double,null], [continent,string,null], [country,string,null], [region,string,null], [city,string,null], [iata,string,null], [code,string,null], [gps,string,null]]
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4584, data_type#4585, comment#4586]
== Physical Plan ==
CommandResult [col_name#4584, data_type#4585, comment#4586]
+- Execute DescribeTableCommand
+- DescribeTableCommand `spark_catalog`.`default`.`airports`, false, [col_name#4584, data_type#4585, comment#4586]
|