blob: 99990c9724893517134791891e92b7108d78de22 [file] [log] [blame]
# Test HDFS scan node.
select 1
from tpch_nested_parquet.customer c, c.c_orders o, o.o_lineitems
where 5 + 5 < c_custkey and o_orderkey = (2 + 2)
and (coalesce(2, 3, 4) * 10) + l_linenumber < (0 * 1)
---- PLAN
Analyzed query: SELECT CAST(1 AS TINYINT) FROM tpch_nested_parquet.customer c,
c.c_orders o, o.o_lineitems WHERE c_custkey > CAST(10 AS BIGINT) AND o_orderkey
= CAST(4 AS BIGINT) AND CAST(20 AS BIGINT) + CAST(l_linenumber AS BIGINT) <
CAST(0 AS BIGINT)
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=264.00MB mem-reservation=16.00MB thread-reservation=2
PLAN-ROOT SINK
| output exprs: CAST(1 AS TINYINT)
| mem-estimate=0B mem-reservation=0B thread-reservation=0
|
01:SUBPLAN
| mem-estimate=0B mem-reservation=0B thread-reservation=0
| tuple-ids=2,1,0 row-size=44B cardinality=1.50M
| in pipelines: 00(GETNEXT)
|
|--08:NESTED LOOP JOIN [CROSS JOIN]
| | mem-estimate=20B mem-reservation=0B thread-reservation=0
| | tuple-ids=2,1,0 row-size=44B cardinality=100
| | in pipelines: 00(GETNEXT)
| |
| |--02:SINGULAR ROW SRC
| | parent-subplan=01
| | mem-estimate=0B mem-reservation=0B thread-reservation=0
| | tuple-ids=0 row-size=20B cardinality=1
| | in pipelines: 00(GETNEXT)
| |
| 04:SUBPLAN
| | mem-estimate=0B mem-reservation=0B thread-reservation=0
| | tuple-ids=2,1 row-size=24B cardinality=100
| | in pipelines: 00(GETNEXT)
| |
| |--07:NESTED LOOP JOIN [CROSS JOIN]
| | | mem-estimate=20B mem-reservation=0B thread-reservation=0
| | | tuple-ids=2,1 row-size=24B cardinality=10
| | | in pipelines: 00(GETNEXT)
| | |
| | |--05:SINGULAR ROW SRC
| | | parent-subplan=04
| | | mem-estimate=0B mem-reservation=0B thread-reservation=0
| | | tuple-ids=1 row-size=20B cardinality=1
| | | in pipelines: 00(GETNEXT)
| | |
| | 06:UNNEST [o.o_lineitems]
| | parent-subplan=04
| | mem-estimate=0B mem-reservation=0B thread-reservation=0
| | tuple-ids=2 row-size=0B cardinality=10
| | in pipelines: 00(GETNEXT)
| |
| 03:UNNEST [c.c_orders o]
| parent-subplan=01
| mem-estimate=0B mem-reservation=0B thread-reservation=0
| tuple-ids=1 row-size=0B cardinality=10
| in pipelines: 00(GETNEXT)
|
00:SCAN HDFS [tpch_nested_parquet.customer c]
HDFS partitions=1/1 files=4 size=288.99MB
predicates: c_custkey > CAST(10 AS BIGINT), !empty(c.c_orders)
predicates on o: !empty(o.o_lineitems), o_orderkey = CAST(4 AS BIGINT)
predicates on o_lineitems: CAST(20 AS BIGINT) + CAST(l_linenumber AS BIGINT) < CAST(0 AS BIGINT)
stored statistics:
table: rows=150.00K size=288.99MB
columns missing stats: c_orders
extrapolated-rows=disabled max-scan-range-rows=50.12K
parquet statistics predicates: c_custkey > CAST(10 AS BIGINT)
parquet statistics predicates on o: o_orderkey = CAST(4 AS BIGINT)
parquet dictionary predicates: c_custkey > CAST(10 AS BIGINT)
parquet dictionary predicates on o: o_orderkey = CAST(4 AS BIGINT)
parquet dictionary predicates on o_lineitems: CAST(20 AS BIGINT) + CAST(l_linenumber AS BIGINT) < CAST(0 AS BIGINT)
mem-estimate=264.00MB mem-reservation=16.00MB thread-reservation=1
tuple-ids=0 row-size=20B cardinality=15.00K
in pipelines: 00(GETNEXT)
====
# Test HBase scan node.
select * from functional_hbase.stringids
where string_col = cast(4 as string) and 2 + 3 = tinyint_col
and id between concat('1', '0') and upper('20')
---- PLAN
Analyzed query: SELECT * FROM functional_hbase.stringids WHERE string_col = '4'
AND tinyint_col = CAST(5 AS TINYINT) AND id >= '10' AND id <= '20'
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=4.00KB mem-reservation=0B thread-reservation=1
PLAN-ROOT SINK
| output exprs: functional_hbase.stringids.id, functional_hbase.stringids.bigint_col, functional_hbase.stringids.bool_col, functional_hbase.stringids.date_string_col, functional_hbase.stringids.day, functional_hbase.stringids.double_col, functional_hbase.stringids.float_col, functional_hbase.stringids.int_col, functional_hbase.stringids.month, functional_hbase.stringids.smallint_col, functional_hbase.stringids.string_col, functional_hbase.stringids.timestamp_col, functional_hbase.stringids.tinyint_col, functional_hbase.stringids.year
| mem-estimate=0B mem-reservation=0B thread-reservation=0
|
00:SCAN HBASE [functional_hbase.stringids]
key predicates: id >= '10', id <= '20'
start key: 10
stop key: 20\0
hbase filters: d:string_col EQUAL '4'
predicates: tinyint_col = CAST(5 AS TINYINT), string_col = '4'
stored statistics:
table: rows=10.00K
columns: all
mem-estimate=4.00KB mem-reservation=0B thread-reservation=0
tuple-ids=0 row-size=107B cardinality=1
in pipelines: 00(GETNEXT)
====
# Test datasource scan node.
select * from functional.alltypes_datasource
where tinyint_col < (pow(2, 8)) and float_col != 0 and 1 + 1 > int_col
---- PLAN
Analyzed query: SELECT * FROM functional.alltypes_datasource WHERE
CAST(tinyint_col AS DOUBLE) < CAST(256 AS DOUBLE) AND float_col != CAST(0 AS
FLOAT) AND int_col < CAST(2 AS INT)
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=1.00GB mem-reservation=0B thread-reservation=1
PLAN-ROOT SINK
| output exprs: functional.alltypes_datasource.id, functional.alltypes_datasource.bool_col, functional.alltypes_datasource.tinyint_col, functional.alltypes_datasource.smallint_col, functional.alltypes_datasource.int_col, functional.alltypes_datasource.bigint_col, functional.alltypes_datasource.float_col, functional.alltypes_datasource.double_col, functional.alltypes_datasource.timestamp_col, functional.alltypes_datasource.string_col, functional.alltypes_datasource.dec_col1, functional.alltypes_datasource.dec_col2, functional.alltypes_datasource.dec_col3, functional.alltypes_datasource.dec_col4, functional.alltypes_datasource.dec_col5, functional.alltypes_datasource.date_col
| mem-estimate=0B mem-reservation=0B thread-reservation=0
|
00:SCAN DATA SOURCE [functional.alltypes_datasource]
data source predicates: CAST(tinyint_col AS DOUBLE) < CAST(256 AS DOUBLE), int_col < CAST(2 AS INT)
predicates: float_col != CAST(0 AS FLOAT)
mem-estimate=1.00GB mem-reservation=0B thread-reservation=0
tuple-ids=0 row-size=116B cardinality=500
in pipelines: 00(GETNEXT)
====
# Test aggregation.
select sum(1 + 1 + id)
from functional.alltypes
group by timestamp_col = cast('2015-11-15' as timestamp) + interval 1 year
having 1024 * 1024 * count(*) % 2 = 0
and (sum(1 + 1 + id) > 1 or sum(1 + 1 + id) > 1)
and (sum(1 + 1 + id) between 5 and 10)
---- PLAN
Analyzed query: SELECT sum(CAST(2 AS BIGINT) + CAST(id AS BIGINT)) FROM
functional.alltypes GROUP BY timestamp_col = TIMESTAMP '2016-11-15 00:00:00'
HAVING CAST(1048576 AS BIGINT) * count(*) % CAST(2 AS BIGINT) = CAST(0 AS
BIGINT) AND sum(CAST(2 AS BIGINT) + CAST(id AS BIGINT)) > CAST(1 AS BIGINT) AND
sum(CAST(2 AS BIGINT) + CAST(id AS BIGINT)) >= CAST(5 AS BIGINT) AND sum(CAST(2
AS BIGINT) + CAST(id AS BIGINT)) <= CAST(10 AS BIGINT)
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=138.00MB mem-reservation=1.97MB thread-reservation=2
PLAN-ROOT SINK
| output exprs: sum(2 + id)
| mem-estimate=0B mem-reservation=0B thread-reservation=0
|
01:AGGREGATE [FINALIZE]
| output: sum(CAST(2 AS BIGINT) + CAST(id AS BIGINT)), count(*)
| group by: timestamp_col = TIMESTAMP '2016-11-15 00:00:00'
| having: sum(2 + id) <= CAST(10 AS BIGINT), sum(2 + id) > CAST(1 AS BIGINT), sum(2 + id) >= CAST(5 AS BIGINT), CAST(1048576 AS BIGINT) * count(*) % CAST(2 AS BIGINT) = CAST(0 AS BIGINT)
| mem-estimate=10.00MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
| tuple-ids=1 row-size=17B cardinality=0
| in pipelines: 01(GETNEXT), 00(OPEN)
|
00:SCAN HDFS [functional.alltypes]
HDFS partitions=24/24 files=24 size=478.45KB
stored statistics:
table: rows=7.30K size=478.45KB
partitions: 24/24 rows=7.30K
columns: all
extrapolated-rows=disabled max-scan-range-rows=310
mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
tuple-ids=0 row-size=20B cardinality=7.30K
in pipelines: 00(GETNEXT)
====
# Test hash join.
select 1 from functional.alltypes a
left outer join functional.alltypes b
on (1 + 1 + a.id = b.id - (1 + 1) and
a.int_col between 0 + 0 + 0 + b.bigint_col and b.bigint_col + ascii('a'))
where round(1.11 + 2.22 + 3.33 + 4.44, 1) < cast(b.double_col as decimal(3, 2))
---- PLAN
Analyzed query: SELECT CAST(1 AS TINYINT) FROM functional.alltypes a LEFT OUTER
JOIN functional.alltypes b ON (CAST(2 AS BIGINT) + CAST(a.id AS BIGINT) =
CAST(b.id AS BIGINT) - CAST(2 AS BIGINT) AND CAST(a.int_col AS BIGINT) >= CAST(0
AS BIGINT) + b.bigint_col AND CAST(a.int_col AS BIGINT) <= b.bigint_col +
CAST(97 AS BIGINT)) WHERE CAST(b.double_col AS DECIMAL(3,2)) > CAST(11.1 AS
DECIMAL(6,1))
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=257.94MB mem-reservation=2.00MB thread-reservation=3
PLAN-ROOT SINK
| output exprs: CAST(1 AS TINYINT)
| mem-estimate=0B mem-reservation=0B thread-reservation=0
|
02:HASH JOIN [LEFT OUTER JOIN]
| hash predicates: 2 + a.id = b.id - 2
| fk/pk conjuncts: assumed fk/pk
| other join predicates: CAST(a.int_col AS BIGINT) <= b.bigint_col + CAST(97 AS BIGINT), CAST(a.int_col AS BIGINT) >= CAST(0 AS BIGINT) + b.bigint_col
| other predicates: CAST(b.double_col AS DECIMAL(3,2)) > CAST(11.1 AS DECIMAL(6,1))
| mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
| tuple-ids=0,1N row-size=28B cardinality=7.30K
| in pipelines: 00(GETNEXT), 01(OPEN)
|
|--01:SCAN HDFS [functional.alltypes b]
| HDFS partitions=24/24 files=24 size=478.45KB
| predicates: CAST(b.double_col AS DECIMAL(3,2)) > CAST(11.1 AS DECIMAL(6,1))
| stored statistics:
| table: rows=7.30K size=478.45KB
| partitions: 24/24 rows=7.30K
| columns: all
| extrapolated-rows=disabled max-scan-range-rows=310
| mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
| tuple-ids=1 row-size=20B cardinality=730
| in pipelines: 01(GETNEXT)
|
00:SCAN HDFS [functional.alltypes a]
HDFS partitions=24/24 files=24 size=478.45KB
stored statistics:
table: rows=7.30K size=478.45KB
partitions: 24/24 rows=7.30K
columns: all
extrapolated-rows=disabled max-scan-range-rows=310
mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
tuple-ids=0 row-size=8B cardinality=7.30K
in pipelines: 00(GETNEXT)
====
# Test nested-loop join. Same as above but and with a disjunction in the On clause.
# The Where-clause predicate has the lhs/rhs flipped.
select 1 from functional.alltypes a
left outer join functional.alltypes b
on (1 + 1 + a.id = b.id - (1 + 1) or
a.int_col between 0 + 0 + 0 + b.bigint_col and b.bigint_col + ascii('a'))
where cast(b.double_col as decimal(3, 2)) > round(1.11 + 2.22 + 3.33 + 4.44, 1)
---- PLAN
Analyzed query: SELECT CAST(1 AS TINYINT) FROM functional.alltypes a LEFT OUTER
JOIN functional.alltypes b ON (CAST(2 AS BIGINT) + CAST(a.id AS BIGINT) =
CAST(b.id AS BIGINT) - CAST(2 AS BIGINT) OR CAST(a.int_col AS BIGINT) >= CAST(0
AS BIGINT) + b.bigint_col AND CAST(a.int_col AS BIGINT) <= b.bigint_col +
CAST(97 AS BIGINT)) WHERE CAST(b.double_col AS DECIMAL(3,2)) > CAST(11.1 AS
DECIMAL(6,1))
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=256.01MB mem-reservation=64.00KB thread-reservation=3
PLAN-ROOT SINK
| output exprs: CAST(1 AS TINYINT)
| mem-estimate=0B mem-reservation=0B thread-reservation=0
|
02:NESTED LOOP JOIN [LEFT OUTER JOIN]
| join predicates: (CAST(2 AS BIGINT) + CAST(a.id AS BIGINT) = CAST(b.id AS BIGINT) - CAST(2 AS BIGINT) OR CAST(a.int_col AS BIGINT) >= CAST(0 AS BIGINT) + b.bigint_col AND CAST(a.int_col AS BIGINT) <= b.bigint_col + CAST(97 AS BIGINT))
| predicates: CAST(b.double_col AS DECIMAL(3,2)) > CAST(11.1 AS DECIMAL(6,1))
| mem-estimate=14.26KB mem-reservation=0B thread-reservation=0
| tuple-ids=0,1N row-size=28B cardinality=7.30K
| in pipelines: 00(GETNEXT), 01(OPEN)
|
|--01:SCAN HDFS [functional.alltypes b]
| HDFS partitions=24/24 files=24 size=478.45KB
| predicates: CAST(b.double_col AS DECIMAL(3,2)) > CAST(11.1 AS DECIMAL(6,1))
| stored statistics:
| table: rows=7.30K size=478.45KB
| partitions: 24/24 rows=7.30K
| columns: all
| extrapolated-rows=disabled max-scan-range-rows=310
| mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
| tuple-ids=1 row-size=20B cardinality=730
| in pipelines: 01(GETNEXT)
|
00:SCAN HDFS [functional.alltypes a]
HDFS partitions=24/24 files=24 size=478.45KB
stored statistics:
table: rows=7.30K size=478.45KB
partitions: 24/24 rows=7.30K
columns: all
extrapolated-rows=disabled max-scan-range-rows=310
mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
tuple-ids=0 row-size=8B cardinality=7.30K
in pipelines: 00(GETNEXT)
====
# Test distinct aggregation with grouping.
select sum(distinct 1 + 1 + id)
from functional.alltypes
group by timestamp_col = cast('2015-11-15' as timestamp) + interval 1 year
having 1024 * 1024 * count(*) % 2 = 0
---- PLAN
Analyzed query: SELECT sum(DISTINCT CAST(2 AS BIGINT) + CAST(id AS BIGINT)) FROM
functional.alltypes GROUP BY timestamp_col = TIMESTAMP '2016-11-15 00:00:00'
HAVING CAST(1048576 AS BIGINT) * count(*) % CAST(2 AS BIGINT) = CAST(0 AS
BIGINT)
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=138.00MB mem-reservation=3.88MB thread-reservation=2
PLAN-ROOT SINK
| output exprs: sum(2 + id)
| mem-estimate=0B mem-reservation=0B thread-reservation=0
|
02:AGGREGATE [FINALIZE]
| output: sum(2 + id), count:merge(*)
| group by: timestamp_col = TIMESTAMP '2016-11-15 00:00:00'
| having: CAST(1048576 AS BIGINT) * count(*) % CAST(2 AS BIGINT) = CAST(0 AS BIGINT)
| mem-estimate=10.00MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
| tuple-ids=2 row-size=17B cardinality=0
| in pipelines: 02(GETNEXT), 01(OPEN)
|
01:AGGREGATE
| output: count(*)
| group by: timestamp_col = TIMESTAMP '2016-11-15 00:00:00', CAST(2 AS BIGINT) + CAST(id AS BIGINT)
| mem-estimate=10.00MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
| tuple-ids=1 row-size=17B cardinality=7.30K
| in pipelines: 01(GETNEXT), 00(OPEN)
|
00:SCAN HDFS [functional.alltypes]
HDFS partitions=24/24 files=24 size=478.45KB
stored statistics:
table: rows=7.30K size=478.45KB
partitions: 24/24 rows=7.30K
columns: all
extrapolated-rows=disabled max-scan-range-rows=310
mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
tuple-ids=0 row-size=20B cardinality=7.30K
in pipelines: 00(GETNEXT)
====
# Test non-grouping distinct aggregation.
select sum(distinct 1 + 1 + id)
from functional.alltypes
having 1024 * 1024 * count(*) % 2 = 0
---- PLAN
Analyzed query: SELECT sum(DISTINCT CAST(2 AS BIGINT) + CAST(id AS BIGINT)) FROM
functional.alltypes HAVING CAST(1048576 AS BIGINT) * count(*) % CAST(2 AS
BIGINT) = CAST(0 AS BIGINT)
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=138.00MB mem-reservation=1.97MB thread-reservation=2
PLAN-ROOT SINK
| output exprs: sum(2 + id)
| mem-estimate=0B mem-reservation=0B thread-reservation=0
|
02:AGGREGATE [FINALIZE]
| output: sum(2 + id), count:merge(*)
| having: CAST(1048576 AS BIGINT) * zeroifnull(count(*)) % CAST(2 AS BIGINT) = CAST(0 AS BIGINT)
| mem-estimate=10.00MB mem-reservation=0B spill-buffer=2.00MB thread-reservation=0
| tuple-ids=2 row-size=16B cardinality=0
| in pipelines: 02(GETNEXT), 01(OPEN)
|
01:AGGREGATE
| output: count(*)
| group by: CAST(2 AS BIGINT) + CAST(id AS BIGINT)
| mem-estimate=10.00MB mem-reservation=1.94MB spill-buffer=64.00KB thread-reservation=0
| tuple-ids=1 row-size=16B cardinality=7.30K
| in pipelines: 01(GETNEXT), 00(OPEN)
|
00:SCAN HDFS [functional.alltypes]
HDFS partitions=24/24 files=24 size=478.45KB
stored statistics:
table: rows=7.30K size=478.45KB
partitions: 24/24 rows=7.30K
columns: all
extrapolated-rows=disabled max-scan-range-rows=310
mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
tuple-ids=0 row-size=4B cardinality=7.30K
in pipelines: 00(GETNEXT)
====
# Test analytic eval node.
select first_value(1 + 1 + int_col - (1 - 1)) over
(partition by concat(concat('a', 'b'), string_col)
order by greatest(greatest(10, 20), bigint_col))
from functional.alltypes
---- PLAN
Analyzed query: SELECT first_value(1 + 1 + int_col - (1 - 1)) OVER (PARTITION BY
concat(concat('a', 'b'), string_col) ORDER BY greatest(greatest(10, 20),
bigint_col) ASC) FROM functional.alltypes
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=144.00MB mem-reservation=16.03MB thread-reservation=2
PLAN-ROOT SINK
| output exprs: first_value(2 + int_col - 0)
| mem-estimate=0B mem-reservation=0B thread-reservation=0
|
02:ANALYTIC
| functions: first_value(2 + int_col - 0)
| partition by: concat('ab', string_col)
| order by: greatest(20, bigint_col) ASC
| window: ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
| mem-estimate=4.00MB mem-reservation=4.00MB spill-buffer=2.00MB thread-reservation=0
| tuple-ids=3,2 row-size=53B cardinality=7.30K
| in pipelines: 01(GETNEXT)
|
01:SORT
| order by: concat('ab', string_col) ASC NULLS FIRST, greatest(20, bigint_col) ASC
| materialized: concat('ab', string_col), greatest(20, bigint_col)
| mem-estimate=12.00MB mem-reservation=12.00MB spill-buffer=2.00MB thread-reservation=0
| tuple-ids=3 row-size=45B cardinality=7.30K
| in pipelines: 01(GETNEXT), 00(OPEN)
|
00:SCAN HDFS [functional.alltypes]
HDFS partitions=24/24 files=24 size=478.45KB
stored statistics:
table: rows=7.30K size=478.45KB
partitions: 24/24 rows=7.30K
columns: all
extrapolated-rows=disabled max-scan-range-rows=310
mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
tuple-ids=0 row-size=25B cardinality=7.30K
in pipelines: 00(GETNEXT)
====
# Test sort node.
select int_col from functional.alltypes
order by id * abs((factorial(5) / power(2, 4)))
---- PLAN
Analyzed query: SELECT int_col FROM functional.alltypes ORDER BY id * CAST(7.5
AS DOUBLE) ASC
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=134.00MB mem-reservation=6.03MB thread-reservation=2
PLAN-ROOT SINK
| output exprs: int_col
| mem-estimate=0B mem-reservation=0B thread-reservation=0
|
01:SORT
| order by: id * 7.5 ASC
| mem-estimate=6.00MB mem-reservation=6.00MB spill-buffer=2.00MB thread-reservation=0
| tuple-ids=1 row-size=8B cardinality=7.30K
| in pipelines: 01(GETNEXT), 00(OPEN)
|
00:SCAN HDFS [functional.alltypes]
HDFS partitions=24/24 files=24 size=478.45KB
stored statistics:
table: rows=7.30K size=478.45KB
partitions: 24/24 rows=7.30K
columns: all
extrapolated-rows=disabled max-scan-range-rows=310
mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
tuple-ids=0 row-size=8B cardinality=7.30K
in pipelines: 00(GETNEXT)
====
# Test HDFS table sink.
insert into functional.alltypes (id, int_col) partition(year,month)
select id, int_col, cast(1 + 1 + 1 + year as int), cast(month - (1 - 1 - 1) as int)
from functional.alltypessmall
---- PLAN
Analyzed query: SELECT id, int_col, CAST(CAST(3 AS BIGINT) + CAST(`year` AS
BIGINT) AS INT), CAST(CAST(`month` AS BIGINT) - CAST(-1 AS BIGINT) AS INT) FROM
functional.alltypessmall
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=38.00MB mem-reservation=6.01MB thread-reservation=2
WRITE TO HDFS [functional.alltypes, OVERWRITE=false, PARTITION-KEYS=(CAST(3 + year AS INT),CAST(month - -1 AS INT))]
| partitions=4
| output exprs: id, NULL, NULL, NULL, int_col, NULL, NULL, NULL, NULL, NULL, NULL, CAST(CAST(3 AS BIGINT) + CAST(year AS BIGINT) AS INT), CAST(CAST(month AS BIGINT) - CAST(-1 AS BIGINT) AS INT)
| mem-estimate=1.56KB mem-reservation=0B thread-reservation=0
|
01:SORT
| order by: CAST(3 + year AS INT) ASC NULLS LAST, CAST(month - -1 AS INT) ASC NULLS LAST
| mem-estimate=6.00MB mem-reservation=6.00MB spill-buffer=2.00MB thread-reservation=0
| tuple-ids=1 row-size=16B cardinality=100
| in pipelines: 01(GETNEXT), 00(OPEN)
|
00:SCAN HDFS [functional.alltypessmall]
HDFS partitions=4/4 files=4 size=6.32KB
stored statistics:
table: rows=100 size=6.32KB
partitions: 4/4 rows=100
columns: all
extrapolated-rows=disabled max-scan-range-rows=25
mem-estimate=32.00MB mem-reservation=8.00KB thread-reservation=1
tuple-ids=0 row-size=16B cardinality=100
in pipelines: 00(GETNEXT)
====
# Constant folding does not work across query blocks.
select sum(id + c3) from
(select id, 10 + c2 as c3 from
(select id, 20 + c1 as c2 from
(select id, 30 as c1 from functional.alltypes limit 2) v1
) v2
) v3
---- PLAN
Analyzed query: SELECT sum(CAST(id AS BIGINT) + CAST(c3 AS BIGINT)) FROM (SELECT
id, CAST(10 AS INT) + CAST(c2 AS INT) c3 FROM (SELECT id, CAST(20 AS SMALLINT) +
CAST(c1 AS SMALLINT) c2 FROM (SELECT id, CAST(30 AS TINYINT) c1 FROM
functional.alltypes LIMIT CAST(2 AS TINYINT)) v1) v2) v3
F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
| Per-Host Resources: mem-estimate=138.00MB mem-reservation=32.00KB thread-reservation=2
PLAN-ROOT SINK
| output exprs: sum(id + c3)
| mem-estimate=0B mem-reservation=0B thread-reservation=0
|
01:AGGREGATE [FINALIZE]
| output: sum(CAST(id AS BIGINT) + CAST(CAST(10 AS INT) + CAST(CAST(20 AS SMALLINT) + CAST(30 AS SMALLINT) AS INT) AS BIGINT))
| mem-estimate=10.00MB mem-reservation=0B spill-buffer=2.00MB thread-reservation=0
| tuple-ids=4 row-size=8B cardinality=1
| in pipelines: 01(GETNEXT), 00(OPEN)
|
00:SCAN HDFS [functional.alltypes]
HDFS partitions=24/24 files=24 size=478.45KB
stored statistics:
table: rows=7.30K size=478.45KB
partitions: 24/24 rows=7.30K
columns: all
extrapolated-rows=disabled max-scan-range-rows=310
limit: 2
mem-estimate=128.00MB mem-reservation=32.00KB thread-reservation=1
tuple-ids=0 row-size=4B cardinality=2
in pipelines: 00(GETNEXT)
====