blob: b7c9fae903000ae9c48697b117cbbba3cc20c088 [file] [log] [blame]
-- Automatically generated by SQLQueryTestSuite
-- !query
CREATE TEMPORARY VIEW tenk2 AS SELECT * FROM tenk1
-- !query analysis
CreateViewCommand `tenk2`, SELECT * FROM tenk1, false, false, LocalTempView, true
+- Project [unique1#x, unique2#x, two#x, four#x, ten#x, twenty#x, hundred#x, thousand#x, twothousand#x, fivethous#x, tenthous#x, odd#x, even#x, stringu1#x, stringu2#x, string4#x]
+- SubqueryAlias spark_catalog.default.tenk1
+- Relation spark_catalog.default.tenk1[unique1#x,unique2#x,two#x,four#x,ten#x,twenty#x,hundred#x,thousand#x,twothousand#x,fivethous#x,tenthous#x,odd#x,even#x,stringu1#x,stringu2#x,string4#x] parquet
-- !query
CREATE TABLE empsalary (
depname string,
empno integer,
salary int,
enroll_date date
) USING parquet
-- !query analysis
CreateDataSourceTableCommand `spark_catalog`.`default`.`empsalary`, false
-- !query
INSERT INTO empsalary VALUES
('develop', 10, 5200, date '2007-08-01'),
('sales', 1, 5000, date '2006-10-01'),
('personnel', 5, 3500, date '2007-12-10'),
('sales', 4, 4800, date '2007-08-08'),
('personnel', 2, 3900, date '2006-12-23'),
('develop', 7, 4200, date '2008-01-01'),
('develop', 9, 4500, date '2008-01-01'),
('sales', 3, 4800, date '2007-08-01'),
('develop', 8, 6000, date '2006-10-01'),
('develop', 11, 5200, date '2007-08-15')
-- !query analysis
InsertIntoHadoopFsRelationCommand file:[not included in comparison]/{warehouse_dir}/empsalary, false, Parquet, [path=file:[not included in comparison]/{warehouse_dir}/empsalary], Append, `spark_catalog`.`default`.`empsalary`, org.apache.spark.sql.execution.datasources.InMemoryFileIndex(file:[not included in comparison]/{warehouse_dir}/empsalary), [depname, empno, salary, enroll_date]
+- Project [cast(col1#x as string) AS depname#x, cast(col2#x as int) AS empno#x, cast(col3#x as int) AS salary#x, cast(col4#x as date) AS enroll_date#x]
+- LocalRelation [col1#x, col2#x, col3#x, col4#x]
-- !query
create table datetimes (
id int,
f_time timestamp,
f_timetz timestamp,
f_interval timestamp,
f_timestamptz timestamp,
f_timestamp timestamp
) using parquet
-- !query analysis
CreateDataSourceTableCommand `spark_catalog`.`default`.`datetimes`, false
-- !query
insert into datetimes values
(1, timestamp '11:00', cast ('11:00 BST' as timestamp), cast ('1 year' as timestamp), cast ('2000-10-19 10:23:54+01' as timestamp), timestamp '2000-10-19 10:23:54'),
(2, timestamp '12:00', cast ('12:00 BST' as timestamp), cast ('2 years' as timestamp), cast ('2001-10-19 10:23:54+01' as timestamp), timestamp '2001-10-19 10:23:54'),
(3, timestamp '13:00', cast ('13:00 BST' as timestamp), cast ('3 years' as timestamp), cast ('2001-10-19 10:23:54+01' as timestamp), timestamp '2001-10-19 10:23:54'),
(4, timestamp '14:00', cast ('14:00 BST' as timestamp), cast ('4 years' as timestamp), cast ('2002-10-19 10:23:54+01' as timestamp), timestamp '2002-10-19 10:23:54'),
(5, timestamp '15:00', cast ('15:00 BST' as timestamp), cast ('5 years' as timestamp), cast ('2003-10-19 10:23:54+01' as timestamp), timestamp '2003-10-19 10:23:54'),
(6, timestamp '15:00', cast ('15:00 BST' as timestamp), cast ('5 years' as timestamp), cast ('2004-10-19 10:23:54+01' as timestamp), timestamp '2004-10-19 10:23:54'),
(7, timestamp '17:00', cast ('17:00 BST' as timestamp), cast ('7 years' as timestamp), cast ('2005-10-19 10:23:54+01' as timestamp), timestamp '2005-10-19 10:23:54'),
(8, timestamp '18:00', cast ('18:00 BST' as timestamp), cast ('8 years' as timestamp), cast ('2006-10-19 10:23:54+01' as timestamp), timestamp '2006-10-19 10:23:54'),
(9, timestamp '19:00', cast ('19:00 BST' as timestamp), cast ('9 years' as timestamp), cast ('2007-10-19 10:23:54+01' as timestamp), timestamp '2007-10-19 10:23:54'),
(10, timestamp '20:00', cast ('20:00 BST' as timestamp), cast ('10 years' as timestamp), cast ('2008-10-19 10:23:54+01' as timestamp), timestamp '2008-10-19 10:23:54')
-- !query analysis
org.apache.spark.sql.AnalysisException
{
"errorClass" : "INVALID_INLINE_TABLE.FAILED_SQL_EXPRESSION_EVALUATION",
"sqlState" : "42000",
"messageParameters" : {
"sqlExpr" : "\"CAST(11:00 BST AS TIMESTAMP)\""
},
"queryContext" : [ {
"objectType" : "",
"objectName" : "",
"startIndex" : 23,
"stopIndex" : 1698,
"fragment" : "values\n(1, timestamp '11:00', cast ('11:00 BST' as timestamp), cast ('1 year' as timestamp), cast ('2000-10-19 10:23:54+01' as timestamp), timestamp '2000-10-19 10:23:54'),\n(2, timestamp '12:00', cast ('12:00 BST' as timestamp), cast ('2 years' as timestamp), cast ('2001-10-19 10:23:54+01' as timestamp), timestamp '2001-10-19 10:23:54'),\n(3, timestamp '13:00', cast ('13:00 BST' as timestamp), cast ('3 years' as timestamp), cast ('2001-10-19 10:23:54+01' as timestamp), timestamp '2001-10-19 10:23:54'),\n(4, timestamp '14:00', cast ('14:00 BST' as timestamp), cast ('4 years' as timestamp), cast ('2002-10-19 10:23:54+01' as timestamp), timestamp '2002-10-19 10:23:54'),\n(5, timestamp '15:00', cast ('15:00 BST' as timestamp), cast ('5 years' as timestamp), cast ('2003-10-19 10:23:54+01' as timestamp), timestamp '2003-10-19 10:23:54'),\n(6, timestamp '15:00', cast ('15:00 BST' as timestamp), cast ('5 years' as timestamp), cast ('2004-10-19 10:23:54+01' as timestamp), timestamp '2004-10-19 10:23:54'),\n(7, timestamp '17:00', cast ('17:00 BST' as timestamp), cast ('7 years' as timestamp), cast ('2005-10-19 10:23:54+01' as timestamp), timestamp '2005-10-19 10:23:54'),\n(8, timestamp '18:00', cast ('18:00 BST' as timestamp), cast ('8 years' as timestamp), cast ('2006-10-19 10:23:54+01' as timestamp), timestamp '2006-10-19 10:23:54'),\n(9, timestamp '19:00', cast ('19:00 BST' as timestamp), cast ('9 years' as timestamp), cast ('2007-10-19 10:23:54+01' as timestamp), timestamp '2007-10-19 10:23:54'),\n(10, timestamp '20:00', cast ('20:00 BST' as timestamp), cast ('10 years' as timestamp), cast ('2008-10-19 10:23:54+01' as timestamp), timestamp '2008-10-19 10:23:54')"
} ]
}
-- !query
WITH cte (x) AS (
SELECT * FROM range(1, 36, 2)
)
SELECT x, (sum(x) over w)
FROM cte
WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following)
-- !query analysis
WithCTE
:- CTERelationDef xxxx, false
: +- SubqueryAlias cte
: +- Project [id#xL AS x#xL]
: +- Project [id#xL]
: +- Range (1, 36, step=2)
+- Project [x#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL]
+- Project [x#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL]
+- Window [sum(x#xL) windowspecdefinition(x#xL ASC NULLS FIRST, specifiedwindowframe(RowFrame, -1, 1)) AS sum(x) OVER (ORDER BY x ASC NULLS FIRST ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL], [x#xL ASC NULLS FIRST]
+- Project [x#xL]
+- SubqueryAlias cte
+- CTERelationRef xxxx, true, [x#xL], false
-- !query
WITH cte (x) AS (
SELECT * FROM range(1, 36, 2)
)
SELECT x, (sum(x) over w)
FROM cte
WINDOW w AS (ORDER BY x range between 1 preceding and 1 following)
-- !query analysis
WithCTE
:- CTERelationDef xxxx, false
: +- SubqueryAlias cte
: +- Project [id#xL AS x#xL]
: +- Project [id#xL]
: +- Range (1, 36, step=2)
+- Project [x#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST RANGE BETWEEN (- 1) FOLLOWING AND 1 FOLLOWING)#xL]
+- Project [x#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST RANGE BETWEEN (- 1) FOLLOWING AND 1 FOLLOWING)#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST RANGE BETWEEN (- 1) FOLLOWING AND 1 FOLLOWING)#xL]
+- Window [sum(x#xL) windowspecdefinition(x#xL ASC NULLS FIRST, specifiedwindowframe(RangeFrame, cast(-1 as bigint), cast(1 as bigint))) AS sum(x) OVER (ORDER BY x ASC NULLS FIRST RANGE BETWEEN (- 1) FOLLOWING AND 1 FOLLOWING)#xL], [x#xL ASC NULLS FIRST]
+- Project [x#xL]
+- SubqueryAlias cte
+- CTERelationRef xxxx, true, [x#xL], false
-- !query
WITH cte (x) AS (
select 1 union all select 1 union all select 1 union all
SELECT * FROM range(5, 50, 2)
)
SELECT x, (sum(x) over w)
FROM cte
WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following)
-- !query analysis
WithCTE
:- CTERelationDef xxxx, false
: +- SubqueryAlias cte
: +- Project [1#xL AS x#xL]
: +- Union false, false
: :- Project [cast(1#x as bigint) AS 1#xL]
: : +- Union false, false
: : :- Union false, false
: : : :- Project [1 AS 1#x]
: : : : +- OneRowRelation
: : : +- Project [1 AS 1#x]
: : : +- OneRowRelation
: : +- Project [1 AS 1#x]
: : +- OneRowRelation
: +- Project [id#xL]
: +- Range (5, 50, step=2)
+- Project [x#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL]
+- Project [x#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL]
+- Window [sum(x#xL) windowspecdefinition(x#xL ASC NULLS FIRST, specifiedwindowframe(RowFrame, -1, 1)) AS sum(x) OVER (ORDER BY x ASC NULLS FIRST ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL], [x#xL ASC NULLS FIRST]
+- Project [x#xL]
+- SubqueryAlias cte
+- CTERelationRef xxxx, true, [x#xL], false
-- !query
WITH cte (x) AS (
select 1 union all select 1 union all select 1 union all
SELECT * FROM range(5, 50, 2)
)
SELECT x, (sum(x) over w)
FROM cte
WINDOW w AS (ORDER BY x range between 1 preceding and 1 following)
-- !query analysis
WithCTE
:- CTERelationDef xxxx, false
: +- SubqueryAlias cte
: +- Project [1#xL AS x#xL]
: +- Union false, false
: :- Project [cast(1#x as bigint) AS 1#xL]
: : +- Union false, false
: : :- Union false, false
: : : :- Project [1 AS 1#x]
: : : : +- OneRowRelation
: : : +- Project [1 AS 1#x]
: : : +- OneRowRelation
: : +- Project [1 AS 1#x]
: : +- OneRowRelation
: +- Project [id#xL]
: +- Range (5, 50, step=2)
+- Project [x#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST RANGE BETWEEN (- 1) FOLLOWING AND 1 FOLLOWING)#xL]
+- Project [x#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST RANGE BETWEEN (- 1) FOLLOWING AND 1 FOLLOWING)#xL, sum(x) OVER (ORDER BY x ASC NULLS FIRST RANGE BETWEEN (- 1) FOLLOWING AND 1 FOLLOWING)#xL]
+- Window [sum(x#xL) windowspecdefinition(x#xL ASC NULLS FIRST, specifiedwindowframe(RangeFrame, cast(-1 as bigint), cast(1 as bigint))) AS sum(x) OVER (ORDER BY x ASC NULLS FIRST RANGE BETWEEN (- 1) FOLLOWING AND 1 FOLLOWING)#xL], [x#xL ASC NULLS FIRST]
+- Project [x#xL]
+- SubqueryAlias cte
+- CTERelationRef xxxx, true, [x#xL], false
-- !query
SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk2)s LIMIT 0
-- !query analysis
GlobalLimit 0
+- LocalLimit 0
+- Project [count(1) OVER (PARTITION BY four ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)#xL]
+- Project [four#x, count(1) OVER (PARTITION BY four ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)#xL, count(1) OVER (PARTITION BY four ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)#xL]
+- Window [count(1) windowspecdefinition(four#x, specifiedwindowframe(RowFrame, unboundedpreceding$(), unboundedfollowing$())) AS count(1) OVER (PARTITION BY four ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)#xL], [four#x]
+- Project [four#x]
+- SubqueryAlias s
+- Union false, false
:- Project [unique1#x, unique2#x, two#x, four#x, ten#x, twenty#x, hundred#x, thousand#x, twothousand#x, fivethous#x, tenthous#x, odd#x, even#x, stringu1#x, stringu2#x, string4#x]
: +- SubqueryAlias spark_catalog.default.tenk1
: +- Relation spark_catalog.default.tenk1[unique1#x,unique2#x,two#x,four#x,ten#x,twenty#x,hundred#x,thousand#x,twothousand#x,fivethous#x,tenthous#x,odd#x,even#x,stringu1#x,stringu2#x,string4#x] parquet
+- Project [unique1#x, unique2#x, two#x, four#x, ten#x, twenty#x, hundred#x, thousand#x, twothousand#x, fivethous#x, tenthous#x, odd#x, even#x, stringu1#x, stringu2#x, string4#x]
+- SubqueryAlias tenk2
+- View (`tenk2`, [unique1#x, unique2#x, two#x, four#x, ten#x, twenty#x, hundred#x, thousand#x, twothousand#x, fivethous#x, tenthous#x, odd#x, even#x, stringu1#x, stringu2#x, string4#x])
+- Project [cast(unique1#x as int) AS unique1#x, cast(unique2#x as int) AS unique2#x, cast(two#x as int) AS two#x, cast(four#x as int) AS four#x, cast(ten#x as int) AS ten#x, cast(twenty#x as int) AS twenty#x, cast(hundred#x as int) AS hundred#x, cast(thousand#x as int) AS thousand#x, cast(twothousand#x as int) AS twothousand#x, cast(fivethous#x as int) AS fivethous#x, cast(tenthous#x as int) AS tenthous#x, cast(odd#x as int) AS odd#x, cast(even#x as int) AS even#x, cast(stringu1#x as string) AS stringu1#x, cast(stringu2#x as string) AS stringu2#x, cast(string4#x as string) AS string4#x]
+- Project [unique1#x, unique2#x, two#x, four#x, ten#x, twenty#x, hundred#x, thousand#x, twothousand#x, fivethous#x, tenthous#x, odd#x, even#x, stringu1#x, stringu2#x, string4#x]
+- SubqueryAlias spark_catalog.default.tenk1
+- Relation spark_catalog.default.tenk1[unique1#x,unique2#x,two#x,four#x,ten#x,twenty#x,hundred#x,thousand#x,twothousand#x,fivethous#x,tenthous#x,odd#x,even#x,stringu1#x,stringu2#x,string4#x] parquet
-- !query
create table t1 (f1 int, f2 int) using parquet
-- !query analysis
CreateDataSourceTableCommand `spark_catalog`.`default`.`t1`, false
-- !query
insert into t1 values (1,1),(1,2),(2,2)
-- !query analysis
InsertIntoHadoopFsRelationCommand file:[not included in comparison]/{warehouse_dir}/t1, false, Parquet, [path=file:[not included in comparison]/{warehouse_dir}/t1], Append, `spark_catalog`.`default`.`t1`, org.apache.spark.sql.execution.datasources.InMemoryFileIndex(file:[not included in comparison]/{warehouse_dir}/t1), [f1, f2]
+- Project [cast(col1#x as int) AS f1#x, cast(col2#x as int) AS f2#x]
+- LocalRelation [col1#x, col2#x]
-- !query
select f1, sum(f1) over (partition by f1
range between 1 preceding and 1 following)
from t1 where f1 = f2
-- !query analysis
org.apache.spark.sql.catalyst.ExtendedAnalysisException
{
"errorClass" : "DATATYPE_MISMATCH.RANGE_FRAME_WITHOUT_ORDER",
"sqlState" : "42K09",
"messageParameters" : {
"sqlExpr" : "\"(PARTITION BY f1 RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING)\""
},
"queryContext" : [ {
"objectType" : "",
"objectName" : "",
"startIndex" : 25,
"stopIndex" : 108,
"fragment" : "(partition by f1\n range between 1 preceding and 1 following)"
} ]
}
-- !query
select f1, sum(f1) over (partition by f1 order by f2
range between 1 preceding and 1 following)
from t1 where f1 = f2
-- !query analysis
Project [f1#x, sum(f1) OVER (PARTITION BY f1 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL]
+- Project [f1#x, f2#x, sum(f1) OVER (PARTITION BY f1 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL, sum(f1) OVER (PARTITION BY f1 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL]
+- Window [sum(f1#x) windowspecdefinition(f1#x, f2#x ASC NULLS FIRST, specifiedwindowframe(RangeFrame, -1, 1)) AS sum(f1) OVER (PARTITION BY f1 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING)#xL], [f1#x], [f2#x ASC NULLS FIRST]
+- Project [f1#x, f2#x]
+- Filter (f1#x = f2#x)
+- SubqueryAlias spark_catalog.default.t1
+- Relation spark_catalog.default.t1[f1#x,f2#x] parquet
-- !query
select f1, sum(f1) over (partition by f1, f1 order by f2
range between 2 preceding and 1 preceding)
from t1 where f1 = f2
-- !query analysis
Project [f1#x, sum(f1) OVER (PARTITION BY f1, f1 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING)#xL]
+- Project [f1#x, f2#x, sum(f1) OVER (PARTITION BY f1, f1 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING)#xL, sum(f1) OVER (PARTITION BY f1, f1 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING)#xL]
+- Window [sum(f1#x) windowspecdefinition(f1#x, f1#x, f2#x ASC NULLS FIRST, specifiedwindowframe(RangeFrame, -2, -1)) AS sum(f1) OVER (PARTITION BY f1, f1 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING)#xL], [f1#x, f1#x], [f2#x ASC NULLS FIRST]
+- Project [f1#x, f2#x]
+- Filter (f1#x = f2#x)
+- SubqueryAlias spark_catalog.default.t1
+- Relation spark_catalog.default.t1[f1#x,f2#x] parquet
-- !query
select f1, sum(f1) over (partition by f1, f2 order by f2
range between 1 following and 2 following)
from t1 where f1 = f2
-- !query analysis
Project [f1#x, sum(f1) OVER (PARTITION BY f1, f2 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING)#xL]
+- Project [f1#x, f2#x, sum(f1) OVER (PARTITION BY f1, f2 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING)#xL, sum(f1) OVER (PARTITION BY f1, f2 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING)#xL]
+- Window [sum(f1#x) windowspecdefinition(f1#x, f2#x, f2#x ASC NULLS FIRST, specifiedwindowframe(RangeFrame, 1, 2)) AS sum(f1) OVER (PARTITION BY f1, f2 ORDER BY f2 ASC NULLS FIRST RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING)#xL], [f1#x, f2#x], [f2#x ASC NULLS FIRST]
+- Project [f1#x, f2#x]
+- Filter (f1#x = f2#x)
+- SubqueryAlias spark_catalog.default.t1
+- Relation spark_catalog.default.t1[f1#x,f2#x] parquet
-- !query
SELECT rank() OVER (ORDER BY length('abc'))
-- !query analysis
Project [RANK() OVER (ORDER BY length(abc) ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)#x]
+- Project [RANK() OVER (ORDER BY length(abc) ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)#x, RANK() OVER (ORDER BY length(abc) ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)#x]
+- Window [rank(length(abc)) windowspecdefinition(length(abc) ASC NULLS FIRST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS RANK() OVER (ORDER BY length(abc) ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)#x], [length(abc) ASC NULLS FIRST]
+- Project
+- OneRowRelation
-- !query
SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10
-- !query analysis
org.apache.spark.sql.AnalysisException
{
"errorClass" : "_LEGACY_ERROR_TEMP_1034",
"messageParameters" : {
"clauseName" : "WHERE"
}
}
-- !query
SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10
-- !query analysis
org.apache.spark.sql.catalyst.ExtendedAnalysisException
{
"errorClass" : "UNSUPPORTED_EXPR_FOR_OPERATOR",
"sqlState" : "42K0E",
"messageParameters" : {
"invalidExprSqls" : "\"row_number() OVER (ORDER BY salary ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)\""
},
"queryContext" : [ {
"objectType" : "",
"objectName" : "",
"startIndex" : 25,
"stopIndex" : 84,
"fragment" : "INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10"
} ]
}
-- !query
SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY 1
-- !query analysis
org.apache.spark.sql.catalyst.ExtendedAnalysisException
{
"errorClass" : "UNSUPPORTED_EXPR_FOR_OPERATOR",
"sqlState" : "42K0E",
"messageParameters" : {
"invalidExprSqls" : "\"RANK() OVER (ORDER BY 1 ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)\""
},
"queryContext" : [ {
"objectType" : "",
"objectName" : "",
"startIndex" : 58,
"stopIndex" : 67,
"fragment" : "GROUP BY 1"
} ]
}
-- !query
SELECT * FROM rank() OVER (ORDER BY random())
-- !query analysis
org.apache.spark.sql.catalyst.parser.ParseException
{
"errorClass" : "PARSE_SYNTAX_ERROR",
"sqlState" : "42601",
"messageParameters" : {
"error" : "'BY'",
"hint" : ""
}
}
-- !query
SELECT * FROM empsalary WHERE (rank() OVER (ORDER BY random())) > 10
-- !query analysis
org.apache.spark.sql.AnalysisException
{
"errorClass" : "_LEGACY_ERROR_TEMP_1034",
"messageParameters" : {
"clauseName" : "WHERE"
}
}
-- !query
SELECT * FROM empsalary WHERE rank() OVER (ORDER BY random())
-- !query analysis
org.apache.spark.sql.AnalysisException
{
"errorClass" : "_LEGACY_ERROR_TEMP_1034",
"messageParameters" : {
"clauseName" : "WHERE"
}
}
-- !query
select rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1
-- !query analysis
org.apache.spark.sql.catalyst.parser.ParseException
{
"errorClass" : "PARSE_SYNTAX_ERROR",
"sqlState" : "42601",
"messageParameters" : {
"error" : "'BY'",
"hint" : ": extra input 'BY'"
}
}
-- !query
SELECT range(1, 100) OVER () FROM empsalary
-- !query analysis
org.apache.spark.sql.AnalysisException
{
"errorClass" : "UNRESOLVED_ROUTINE",
"sqlState" : "42883",
"messageParameters" : {
"routineName" : "`range`",
"searchPath" : "[`system`.`builtin`, `system`.`session`, `spark_catalog`.`default`]"
},
"queryContext" : [ {
"objectType" : "",
"objectName" : "",
"startIndex" : 8,
"stopIndex" : 28,
"fragment" : "range(1, 100) OVER ()"
} ]
}
-- !query
SELECT ntile(0) OVER (ORDER BY ten), ten, four FROM tenk1
-- !query analysis
org.apache.spark.sql.catalyst.ExtendedAnalysisException
{
"errorClass" : "DATATYPE_MISMATCH.VALUE_OUT_OF_RANGE",
"sqlState" : "42K09",
"messageParameters" : {
"currentValue" : "0",
"exprName" : "buckets",
"sqlExpr" : "\"ntile(0)\"",
"valueRange" : "(0, 2147483647]"
},
"queryContext" : [ {
"objectType" : "",
"objectName" : "",
"startIndex" : 8,
"stopIndex" : 35,
"fragment" : "ntile(0) OVER (ORDER BY ten)"
} ]
}
-- !query
SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1
-- !query analysis
org.apache.spark.sql.catalyst.ExtendedAnalysisException
{
"errorClass" : "DATATYPE_MISMATCH.VALUE_OUT_OF_RANGE",
"sqlState" : "42K09",
"messageParameters" : {
"currentValue" : "0L",
"exprName" : "offset",
"sqlExpr" : "\"nth_value(four, 0)\"",
"valueRange" : "(0, 9223372036854775807]"
},
"queryContext" : [ {
"objectType" : "",
"objectName" : "",
"startIndex" : 8,
"stopIndex" : 45,
"fragment" : "nth_value(four, 0) OVER (ORDER BY ten)"
} ]
}
-- !query
DROP TABLE empsalary
-- !query analysis
DropTable false, false
+- ResolvedIdentifier V2SessionCatalog(spark_catalog), default.empsalary
-- !query
DROP TABLE datetimes
-- !query analysis
DropTable false, false
+- ResolvedIdentifier V2SessionCatalog(spark_catalog), default.datetimes
-- !query
DROP TABLE t1
-- !query analysis
DropTable false, false
+- ResolvedIdentifier V2SessionCatalog(spark_catalog), default.t1