Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fix test
  • Loading branch information
dtenedor committed Jun 20, 2024
commit f5e5f64ea89e642c6805a21278f7b30ff3bf59d8
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ import org.apache.spark.util.Utils
""",
examples = """
Examples:
> SELECT IS_NULL(_FUNC_(result, '{"type": "record", "name": "struct", "fields": [{ "name": "u", "type": ["int","string"] }]}')) AS result FROM (SELECT NAMED_STRUCT('u', NAMED_STRUCT('member0', member0, 'member1', member1)) AS s FROM VALUES (1, NULL), (NULL, 'a') tab(member0, member1));
> SELECT _FUNC_(s, '{"type": "record", "name": "struct", "fields": [{ "name": "u", "type": ["int","string"] }]}', map()) IS NULL AS result FROM (SELECT NAMED_STRUCT('u', NAMED_STRUCT('member0', member0, 'member1', member1)) AS s FROM VALUES (1, NULL), (NULL, 'a') tab(member0, member1));
[false]
""",
note = """
Expand Down Expand Up @@ -131,7 +131,7 @@ case class FromAvro(child: Expression, jsonFormatSchema: Expression, options: Ex
""",
examples = """
Examples:
> SELECT IS_NULL(_FUNC_(result, '{"type": "record", "name": "struct", "fields": [{ "name": "u", "type": ["int","string"] }]}', MAP() ).u FROM (SELECT FROM_AVRO(result, '{"type": "record", "name": "struct", "fields": [{ "name": "u", "type": ["int","string"] }]}' ) AS result FROM (SELECT NAMED_STRUCT('u', NAMED_STRUCT('member0', member0, 'member1', member1)) AS s FROM VALUES (1, NULL), (NULL, 'a') tab(member0, member1)));
> SELECT _FUNC_(s, '{"type": "record", "name": "struct", "fields": [{ "name": "u", "type": ["int","string"] }]}', MAP()).u IS NULL FROM (SELECT FROM_AVRO(result, '{"type": "record", "name": "struct", "fields": [{ "name": "u", "type": ["int","string"] }]}' ) AS s FROM (SELECT NAMED_STRUCT('u', NAMED_STRUCT('member0', member0, 'member1', member1)) AS s FROM VALUES (1, NULL), (NULL, 'a') tab(member0, member1));
[false]
""",
group = "misc_funcs",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,6 @@
| org.apache.spark.sql.catalyst.expressions.FormatNumber | format_number | SELECT format_number(12332.123456, 4) | struct<format_number(12332.123456, 4):string> |
| org.apache.spark.sql.catalyst.expressions.FormatString | format_string | SELECT format_string("Hello World %d %s", 100, "days") | struct<format_string(Hello World %d %s, 100, days):string> |
| org.apache.spark.sql.catalyst.expressions.FormatString | printf | SELECT printf("Hello World %d %s", 100, "days") | struct<printf(Hello World %d %s, 100, days):string> |
| org.apache.spark.sql.catalyst.expressions.FromAvroSqlFunction | from_avro | N/A | N/A |
| org.apache.spark.sql.catalyst.expressions.FromUTCTimestamp | from_utc_timestamp | SELECT from_utc_timestamp('2016-08-31', 'Asia/Seoul') | struct<from_utc_timestamp(2016-08-31, Asia/Seoul):timestamp> |
| org.apache.spark.sql.catalyst.expressions.FromUnixTime | from_unixtime | SELECT from_unixtime(0, 'yyyy-MM-dd HH:mm:ss') | struct<from_unixtime(0, yyyy-MM-dd HH:mm:ss):string> |
| org.apache.spark.sql.catalyst.expressions.Get | get | SELECT get(array(1, 2, 3), 0) | struct<get(array(1, 2, 3), 0):int> |
Expand Down Expand Up @@ -335,7 +334,6 @@
| org.apache.spark.sql.catalyst.expressions.Tan | tan | SELECT tan(0) | struct<TAN(0):double> |
| org.apache.spark.sql.catalyst.expressions.Tanh | tanh | SELECT tanh(0) | struct<TANH(0):double> |
| org.apache.spark.sql.catalyst.expressions.TimeWindow | window | SELECT a, window.start, window.end, count(*) as cnt FROM VALUES ('A1', '2021-01-01 00:00:00'), ('A1', '2021-01-01 00:04:30'), ('A1', '2021-01-01 00:06:00'), ('A2', '2021-01-01 00:01:00') AS tab(a, b) GROUP by a, window(b, '5 minutes') ORDER BY a, start | struct<a:string,start:timestamp,end:timestamp,cnt:bigint> |
| org.apache.spark.sql.catalyst.expressions.ToAvroSqlFunction | to_avro | N/A | N/A |
| org.apache.spark.sql.catalyst.expressions.ToBinary | to_binary | SELECT to_binary('abc', 'utf-8') | struct<to_binary(abc, utf-8):binary> |
| org.apache.spark.sql.catalyst.expressions.ToCharacterBuilder | to_char | SELECT to_char(454, '999') | struct<to_char(454, 999):string> |
| org.apache.spark.sql.catalyst.expressions.ToCharacterBuilder | to_varchar | SELECT to_varchar(454, '999') | struct<to_char(454, 999):string> |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,10 @@ class ExpressionsSchemaSuite extends QueryTest with SharedSparkSession {
// Note: We need to filter out the commands that set the parameters, such as:
// SET spark.sql.parser.escapedStringLiterals=true
example.split(" > ").tail.filterNot(_.trim.startsWith("SET")).take(1).foreach {
case _ if funcName == "from_avro" || funcName == "to_avro" =>
// Skip running the example queries for the from_avro and to_avro functions because
// these functions dynamically load the AvroDataToCatalyst or CatalystDataToAvro classes
// which are not available in this test.
case exampleRe(sql, _) =>
val df = spark.sql(sql)
val escapedSql = sql.replaceAll("\\|", "&#124;")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,9 @@ class ExpressionInfoSuite extends SparkFunSuite with SharedSparkSession {
// Throws an error
"org.apache.spark.sql.catalyst.expressions.RaiseErrorExpressionBuilder",
"org.apache.spark.sql.catalyst.expressions.AssertTrue",
// Requires dynamic class loading not available in this test suite.
"org.apache.spark.sql.catalyst.expressions.FromAvro",
"org.apache.spark.sql.catalyst.expressions.ToAvro",
classOf[CurrentUser].getName,
// The encrypt expression includes a random initialization vector to its encrypted result
classOf[AesEncrypt].getName)
Expand Down