Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, jsonpath, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    build_timetostr_or_tochar,
  10    binary_from_function,
  11    build_default_decimal_type,
  12    build_timestamp_from_parts,
  13    date_delta_sql,
  14    date_trunc_to_time,
  15    datestrtodate_sql,
  16    build_formatted_time,
  17    if_sql,
  18    inline_array_sql,
  19    max_or_greatest,
  20    min_or_least,
  21    rename_func,
  22    timestamptrunc_sql,
  23    timestrtotime_sql,
  24    var_map_sql,
  25    map_date_part,
  26    no_timestamp_sql,
  27    strposition_sql,
  28    timestampdiff_sql,
  29    no_make_interval_sql,
  30    groupconcat_sql,
  31)
  32from sqlglot.generator import unsupported_args
  33from sqlglot.helper import flatten, is_float, is_int, seq_get
  34from sqlglot.tokens import TokenType
  35
  36if t.TYPE_CHECKING:
  37    from sqlglot._typing import E, B
  38
  39
  40# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  41def _build_datetime(
  42    name: str, kind: exp.DataType.Type, safe: bool = False
  43) -> t.Callable[[t.List], exp.Func]:
  44    def _builder(args: t.List) -> exp.Func:
  45        value = seq_get(args, 0)
  46        scale_or_fmt = seq_get(args, 1)
  47
  48        int_value = value is not None and is_int(value.name)
  49        int_scale_or_fmt = scale_or_fmt is not None and scale_or_fmt.is_int
  50
  51        if isinstance(value, exp.Literal) or (value and scale_or_fmt):
  52            # Converts calls like `TO_TIME('01:02:03')` into casts
  53            if len(args) == 1 and value.is_string and not int_value:
  54                return (
  55                    exp.TryCast(this=value, to=exp.DataType.build(kind))
  56                    if safe
  57                    else exp.cast(value, kind)
  58                )
  59
  60            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  61            # cases so we can transpile them, since they're relatively common
  62            if kind == exp.DataType.Type.TIMESTAMP:
  63                if not safe and (int_value or int_scale_or_fmt):
  64                    # TRY_TO_TIMESTAMP('integer') is not parsed into exp.UnixToTime as
  65                    # it's not easily transpilable
  66                    return exp.UnixToTime(this=value, scale=scale_or_fmt)
  67                if not int_scale_or_fmt and not is_float(value.name):
  68                    expr = build_formatted_time(exp.StrToTime, "snowflake")(args)
  69                    expr.set("safe", safe)
  70                    return expr
  71
  72        if kind in (exp.DataType.Type.DATE, exp.DataType.Type.TIME) and not int_value:
  73            klass = exp.TsOrDsToDate if kind == exp.DataType.Type.DATE else exp.TsOrDsToTime
  74            formatted_exp = build_formatted_time(klass, "snowflake")(args)
  75            formatted_exp.set("safe", safe)
  76            return formatted_exp
  77
  78        return exp.Anonymous(this=name, expressions=args)
  79
  80    return _builder
  81
  82
  83def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  84    expression = parser.build_var_map(args)
  85
  86    if isinstance(expression, exp.StarMap):
  87        return expression
  88
  89    return exp.Struct(
  90        expressions=[
  91            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  92        ]
  93    )
  94
  95
  96def _build_datediff(args: t.List) -> exp.DateDiff:
  97    return exp.DateDiff(
  98        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  99    )
 100
 101
 102def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 103    def _builder(args: t.List) -> E:
 104        return expr_type(
 105            this=seq_get(args, 2),
 106            expression=seq_get(args, 1),
 107            unit=map_date_part(seq_get(args, 0)),
 108        )
 109
 110    return _builder
 111
 112
 113def _build_bitwise(expr_type: t.Type[B], name: str) -> t.Callable[[t.List], B | exp.Anonymous]:
 114    def _builder(args: t.List) -> B | exp.Anonymous:
 115        if len(args) == 3:
 116            return exp.Anonymous(this=name, expressions=args)
 117
 118        return binary_from_function(expr_type)(args)
 119
 120    return _builder
 121
 122
 123# https://docs.snowflake.com/en/sql-reference/functions/div0
 124def _build_if_from_div0(args: t.List) -> exp.If:
 125    lhs = exp._wrap(seq_get(args, 0), exp.Binary)
 126    rhs = exp._wrap(seq_get(args, 1), exp.Binary)
 127
 128    cond = exp.EQ(this=rhs, expression=exp.Literal.number(0)).and_(
 129        exp.Is(this=lhs, expression=exp.null()).not_()
 130    )
 131    true = exp.Literal.number(0)
 132    false = exp.Div(this=lhs, expression=rhs)
 133    return exp.If(this=cond, true=true, false=false)
 134
 135
 136# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 137def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 138    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 139    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 140
 141
 142# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 143def _build_if_from_nullifzero(args: t.List) -> exp.If:
 144    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 145    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 146
 147
 148def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 149    flag = expression.text("flag")
 150
 151    if "i" not in flag:
 152        flag += "i"
 153
 154    return self.func(
 155        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 156    )
 157
 158
 159def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 160    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 161
 162    if not regexp_replace.args.get("replacement"):
 163        regexp_replace.set("replacement", exp.Literal.string(""))
 164
 165    return regexp_replace
 166
 167
 168def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 169    def _parse(self: Snowflake.Parser) -> exp.Show:
 170        return self._parse_show_snowflake(*args, **kwargs)
 171
 172    return _parse
 173
 174
 175def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 176    trunc = date_trunc_to_time(args)
 177    trunc.set("unit", map_date_part(trunc.args["unit"]))
 178    return trunc
 179
 180
 181def _unqualify_pivot_columns(expression: exp.Expression) -> exp.Expression:
 182    """
 183    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 184    so we need to unqualify them. Same goes for ANY ORDER BY <column>.
 185
 186    Example:
 187        >>> from sqlglot import parse_one
 188        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 189        >>> print(_unqualify_pivot_columns(expr).sql(dialect="snowflake"))
 190        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 191    """
 192    if isinstance(expression, exp.Pivot):
 193        if expression.unpivot:
 194            expression = transforms.unqualify_columns(expression)
 195        else:
 196            for field in expression.fields:
 197                field_expr = seq_get(field.expressions if field else [], 0)
 198
 199                if isinstance(field_expr, exp.PivotAny):
 200                    unqualified_field_expr = transforms.unqualify_columns(field_expr)
 201                    t.cast(exp.Expression, field).set("expressions", unqualified_field_expr, 0)
 202
 203    return expression
 204
 205
 206def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 207    assert isinstance(expression, exp.Create)
 208
 209    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 210        if expression.this in exp.DataType.NESTED_TYPES:
 211            expression.set("expressions", None)
 212        return expression
 213
 214    props = expression.args.get("properties")
 215    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 216        for schema_expression in expression.this.expressions:
 217            if isinstance(schema_expression, exp.ColumnDef):
 218                column_type = schema_expression.kind
 219                if isinstance(column_type, exp.DataType):
 220                    column_type.transform(_flatten_structured_type, copy=False)
 221
 222    return expression
 223
 224
 225def _unnest_generate_date_array(unnest: exp.Unnest) -> None:
 226    generate_date_array = unnest.expressions[0]
 227    start = generate_date_array.args.get("start")
 228    end = generate_date_array.args.get("end")
 229    step = generate_date_array.args.get("step")
 230
 231    if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 232        return
 233
 234    unit = step.args.get("unit")
 235
 236    unnest_alias = unnest.args.get("alias")
 237    if unnest_alias:
 238        unnest_alias = unnest_alias.copy()
 239        sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 240    else:
 241        sequence_value_name = "value"
 242
 243    # We'll add the next sequence value to the starting date and project the result
 244    date_add = _build_date_time_add(exp.DateAdd)(
 245        [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 246    ).as_(sequence_value_name)
 247
 248    # We use DATEDIFF to compute the number of sequence values needed
 249    number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 250        [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 251    )
 252
 253    unnest.set("expressions", [number_sequence])
 254    unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 255
 256
 257def _transform_generate_date_array(expression: exp.Expression) -> exp.Expression:
 258    if isinstance(expression, exp.Select):
 259        for generate_date_array in expression.find_all(exp.GenerateDateArray):
 260            parent = generate_date_array.parent
 261
 262            # If GENERATE_DATE_ARRAY is used directly as an array (e.g passed into ARRAY_LENGTH), the transformed Snowflake
 263            # query is the following (it'll be unnested properly on the next iteration due to copy):
 264            # SELECT ref(GENERATE_DATE_ARRAY(...)) -> SELECT ref((SELECT ARRAY_AGG(*) FROM UNNEST(GENERATE_DATE_ARRAY(...))))
 265            if not isinstance(parent, exp.Unnest):
 266                unnest = exp.Unnest(expressions=[generate_date_array.copy()])
 267                generate_date_array.replace(
 268                    exp.select(exp.ArrayAgg(this=exp.Star())).from_(unnest).subquery()
 269                )
 270
 271            if (
 272                isinstance(parent, exp.Unnest)
 273                and isinstance(parent.parent, (exp.From, exp.Join))
 274                and len(parent.expressions) == 1
 275            ):
 276                _unnest_generate_date_array(parent)
 277
 278    return expression
 279
 280
 281def _build_regexp_extract(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 282    def _builder(args: t.List) -> E:
 283        return expr_type(
 284            this=seq_get(args, 0),
 285            expression=seq_get(args, 1),
 286            position=seq_get(args, 2),
 287            occurrence=seq_get(args, 3),
 288            parameters=seq_get(args, 4),
 289            group=seq_get(args, 5) or exp.Literal.number(0),
 290        )
 291
 292    return _builder
 293
 294
 295def _regexpextract_sql(self, expression: exp.RegexpExtract | exp.RegexpExtractAll) -> str:
 296    # Other dialects don't support all of the following parameters, so we need to
 297    # generate default values as necessary to ensure the transpilation is correct
 298    group = expression.args.get("group")
 299
 300    # To avoid generating all these default values, we set group to None if
 301    # it's 0 (also default value) which doesn't trigger the following chain
 302    if group and group.name == "0":
 303        group = None
 304
 305    parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 306    occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 307    position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 308
 309    return self.func(
 310        "REGEXP_SUBSTR" if isinstance(expression, exp.RegexpExtract) else "REGEXP_EXTRACT_ALL",
 311        expression.this,
 312        expression.expression,
 313        position,
 314        occurrence,
 315        parameters,
 316        group,
 317    )
 318
 319
 320def _json_extract_value_array_sql(
 321    self: Snowflake.Generator, expression: exp.JSONValueArray | exp.JSONExtractArray
 322) -> str:
 323    json_extract = exp.JSONExtract(this=expression.this, expression=expression.expression)
 324    ident = exp.to_identifier("x")
 325
 326    if isinstance(expression, exp.JSONValueArray):
 327        this: exp.Expression = exp.cast(ident, to=exp.DataType.Type.VARCHAR)
 328    else:
 329        this = exp.ParseJSON(this=f"TO_JSON({ident})")
 330
 331    transform_lambda = exp.Lambda(expressions=[ident], this=this)
 332
 333    return self.func("TRANSFORM", json_extract, transform_lambda)
 334
 335
 336class Snowflake(Dialect):
 337    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 338    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 339    NULL_ORDERING = "nulls_are_large"
 340    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 341    SUPPORTS_USER_DEFINED_TYPES = False
 342    SUPPORTS_SEMI_ANTI_JOIN = False
 343    PREFER_CTE_ALIAS_COLUMN = True
 344    TABLESAMPLE_SIZE_IS_PERCENT = True
 345    COPY_PARAMS_ARE_CSV = False
 346    ARRAY_AGG_INCLUDES_NULLS = None
 347
 348    TIME_MAPPING = {
 349        "YYYY": "%Y",
 350        "yyyy": "%Y",
 351        "YY": "%y",
 352        "yy": "%y",
 353        "MMMM": "%B",
 354        "mmmm": "%B",
 355        "MON": "%b",
 356        "mon": "%b",
 357        "MM": "%m",
 358        "mm": "%m",
 359        "DD": "%d",
 360        "dd": "%-d",
 361        "DY": "%a",
 362        "dy": "%w",
 363        "HH24": "%H",
 364        "hh24": "%H",
 365        "HH12": "%I",
 366        "hh12": "%I",
 367        "MI": "%M",
 368        "mi": "%M",
 369        "SS": "%S",
 370        "ss": "%S",
 371        "FF6": "%f",
 372        "ff6": "%f",
 373    }
 374
 375    DATE_PART_MAPPING = {
 376        **Dialect.DATE_PART_MAPPING,
 377        "ISOWEEK": "WEEKISO",
 378    }
 379
 380    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 381        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 382        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 383        if (
 384            isinstance(expression, exp.Identifier)
 385            and isinstance(expression.parent, exp.Table)
 386            and expression.name.lower() == "dual"
 387        ):
 388            return expression  # type: ignore
 389
 390        return super().quote_identifier(expression, identify=identify)
 391
 392    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
 393        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
 394        SINGLE_TOKENS.pop("$")
 395
 396    class Parser(parser.Parser):
 397        IDENTIFY_PIVOT_STRINGS = True
 398        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 399        COLON_IS_VARIANT_EXTRACT = True
 400
 401        ID_VAR_TOKENS = {
 402            *parser.Parser.ID_VAR_TOKENS,
 403            TokenType.MATCH_CONDITION,
 404        }
 405
 406        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 407        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 408
 409        COLON_PLACEHOLDER_TOKENS = ID_VAR_TOKENS | {TokenType.NUMBER}
 410
 411        FUNCTIONS = {
 412            **parser.Parser.FUNCTIONS,
 413            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 414            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 415            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 416                this=seq_get(args, 1), expression=seq_get(args, 0)
 417            ),
 418            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 419                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 420                start=seq_get(args, 0),
 421                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 422                step=seq_get(args, 2),
 423            ),
 424            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 425            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 426            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 427            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 428            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
 429            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
 430            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
 431            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
 432            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
 433            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 434            "DATE_TRUNC": _date_trunc_to_time,
 435            "DATEADD": _build_date_time_add(exp.DateAdd),
 436            "DATEDIFF": _build_datediff,
 437            "DIV0": _build_if_from_div0,
 438            "EDITDISTANCE": lambda args: exp.Levenshtein(
 439                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 440            ),
 441            "FLATTEN": exp.Explode.from_arg_list,
 442            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 443                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 444            ),
 445            "HEX_DECODE_BINARY": exp.Unhex.from_arg_list,
 446            "IFF": exp.If.from_arg_list,
 447            "LAST_DAY": lambda args: exp.LastDay(
 448                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 449            ),
 450            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 451            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 452            "NULLIFZERO": _build_if_from_nullifzero,
 453            "OBJECT_CONSTRUCT": _build_object_construct,
 454            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 455            "REGEXP_REPLACE": _build_regexp_replace,
 456            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 457            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 458            "RLIKE": exp.RegexpLike.from_arg_list,
 459            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 460            "TABLE": lambda args: exp.TableFromRows(this=seq_get(args, 0)),
 461            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 462            "TIMEDIFF": _build_datediff,
 463            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 464            "TIMESTAMPDIFF": _build_datediff,
 465            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 466            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 467            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 468            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 469            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 470            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 471            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
 472            "TRY_TO_TIMESTAMP": _build_datetime(
 473                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 474            ),
 475            "TO_CHAR": build_timetostr_or_tochar,
 476            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 477            "TO_NUMBER": lambda args: exp.ToNumber(
 478                this=seq_get(args, 0),
 479                format=seq_get(args, 1),
 480                precision=seq_get(args, 2),
 481                scale=seq_get(args, 3),
 482            ),
 483            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 484            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 485            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 486            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 487            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 488            "TO_VARCHAR": exp.ToChar.from_arg_list,
 489            "ZEROIFNULL": _build_if_from_zeroifnull,
 490        }
 491
 492        FUNCTION_PARSERS = {
 493            **parser.Parser.FUNCTION_PARSERS,
 494            "DATE_PART": lambda self: self._parse_date_part(),
 495            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 496            "LISTAGG": lambda self: self._parse_string_agg(),
 497        }
 498        FUNCTION_PARSERS.pop("TRIM")
 499
 500        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 501
 502        RANGE_PARSERS = {
 503            **parser.Parser.RANGE_PARSERS,
 504            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 505            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 506        }
 507
 508        ALTER_PARSERS = {
 509            **parser.Parser.ALTER_PARSERS,
 510            "UNSET": lambda self: self.expression(
 511                exp.Set,
 512                tag=self._match_text_seq("TAG"),
 513                expressions=self._parse_csv(self._parse_id_var),
 514                unset=True,
 515            ),
 516        }
 517
 518        STATEMENT_PARSERS = {
 519            **parser.Parser.STATEMENT_PARSERS,
 520            TokenType.GET: lambda self: self._parse_get(),
 521            TokenType.PUT: lambda self: self._parse_put(),
 522            TokenType.SHOW: lambda self: self._parse_show(),
 523        }
 524
 525        PROPERTY_PARSERS = {
 526            **parser.Parser.PROPERTY_PARSERS,
 527            "CREDENTIALS": lambda self: self._parse_credentials_property(),
 528            "FILE_FORMAT": lambda self: self._parse_file_format_property(),
 529            "LOCATION": lambda self: self._parse_location_property(),
 530            "TAG": lambda self: self._parse_tag(),
 531            "USING": lambda self: self._match_text_seq("TEMPLATE")
 532            and self.expression(exp.UsingTemplateProperty, this=self._parse_statement()),
 533        }
 534
 535        TYPE_CONVERTERS = {
 536            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 537            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 538        }
 539
 540        SHOW_PARSERS = {
 541            "DATABASES": _show_parser("DATABASES"),
 542            "TERSE DATABASES": _show_parser("DATABASES"),
 543            "SCHEMAS": _show_parser("SCHEMAS"),
 544            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 545            "OBJECTS": _show_parser("OBJECTS"),
 546            "TERSE OBJECTS": _show_parser("OBJECTS"),
 547            "TABLES": _show_parser("TABLES"),
 548            "TERSE TABLES": _show_parser("TABLES"),
 549            "VIEWS": _show_parser("VIEWS"),
 550            "TERSE VIEWS": _show_parser("VIEWS"),
 551            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 552            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 553            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 554            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 555            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 556            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 557            "SEQUENCES": _show_parser("SEQUENCES"),
 558            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 559            "STAGES": _show_parser("STAGES"),
 560            "COLUMNS": _show_parser("COLUMNS"),
 561            "USERS": _show_parser("USERS"),
 562            "TERSE USERS": _show_parser("USERS"),
 563            "FILE FORMATS": _show_parser("FILE FORMATS"),
 564            "FUNCTIONS": _show_parser("FUNCTIONS"),
 565            "PROCEDURES": _show_parser("PROCEDURES"),
 566            "WAREHOUSES": _show_parser("WAREHOUSES"),
 567        }
 568
 569        CONSTRAINT_PARSERS = {
 570            **parser.Parser.CONSTRAINT_PARSERS,
 571            "WITH": lambda self: self._parse_with_constraint(),
 572            "MASKING": lambda self: self._parse_with_constraint(),
 573            "PROJECTION": lambda self: self._parse_with_constraint(),
 574            "TAG": lambda self: self._parse_with_constraint(),
 575        }
 576
 577        STAGED_FILE_SINGLE_TOKENS = {
 578            TokenType.DOT,
 579            TokenType.MOD,
 580            TokenType.SLASH,
 581        }
 582
 583        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 584
 585        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 586
 587        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 588
 589        LAMBDAS = {
 590            **parser.Parser.LAMBDAS,
 591            TokenType.ARROW: lambda self, expressions: self.expression(
 592                exp.Lambda,
 593                this=self._replace_lambda(
 594                    self._parse_assignment(),
 595                    expressions,
 596                ),
 597                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 598            ),
 599        }
 600
 601        def _parse_use(self) -> exp.Use:
 602            if self._match_text_seq("SECONDARY", "ROLES"):
 603                this = self._match_texts(("ALL", "NONE")) and exp.var(self._prev.text.upper())
 604                roles = None if this else self._parse_csv(lambda: self._parse_table(schema=False))
 605                return self.expression(
 606                    exp.Use, kind="SECONDARY ROLES", this=this, expressions=roles
 607                )
 608
 609            return super()._parse_use()
 610
 611        def _negate_range(
 612            self, this: t.Optional[exp.Expression] = None
 613        ) -> t.Optional[exp.Expression]:
 614            if not this:
 615                return this
 616
 617            query = this.args.get("query")
 618            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 619                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 620                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 621                # which can produce different results (most likely a SnowFlake bug).
 622                #
 623                # https://docs.snowflake.com/en/sql-reference/functions/in
 624                # Context: https://github.com/tobymao/sqlglot/issues/3890
 625                return self.expression(
 626                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 627                )
 628
 629            return self.expression(exp.Not, this=this)
 630
 631        def _parse_tag(self) -> exp.Tags:
 632            return self.expression(
 633                exp.Tags,
 634                expressions=self._parse_wrapped_csv(self._parse_property),
 635            )
 636
 637        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 638            if self._prev.token_type != TokenType.WITH:
 639                self._retreat(self._index - 1)
 640
 641            if self._match_text_seq("MASKING", "POLICY"):
 642                policy = self._parse_column()
 643                return self.expression(
 644                    exp.MaskingPolicyColumnConstraint,
 645                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 646                    expressions=self._match(TokenType.USING)
 647                    and self._parse_wrapped_csv(self._parse_id_var),
 648                )
 649            if self._match_text_seq("PROJECTION", "POLICY"):
 650                policy = self._parse_column()
 651                return self.expression(
 652                    exp.ProjectionPolicyColumnConstraint,
 653                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 654                )
 655            if self._match(TokenType.TAG):
 656                return self._parse_tag()
 657
 658            return None
 659
 660        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 661            if self._match(TokenType.TAG):
 662                return self._parse_tag()
 663
 664            return super()._parse_with_property()
 665
 666        def _parse_create(self) -> exp.Create | exp.Command:
 667            expression = super()._parse_create()
 668            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 669                # Replace the Table node with the enclosed Identifier
 670                expression.this.replace(expression.this.this)
 671
 672            return expression
 673
 674        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 675        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 676        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 677            this = self._parse_var() or self._parse_type()
 678
 679            if not this:
 680                return None
 681
 682            self._match(TokenType.COMMA)
 683            expression = self._parse_bitwise()
 684            this = map_date_part(this)
 685            name = this.name.upper()
 686
 687            if name.startswith("EPOCH"):
 688                if name == "EPOCH_MILLISECOND":
 689                    scale = 10**3
 690                elif name == "EPOCH_MICROSECOND":
 691                    scale = 10**6
 692                elif name == "EPOCH_NANOSECOND":
 693                    scale = 10**9
 694                else:
 695                    scale = None
 696
 697                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 698                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 699
 700                if scale:
 701                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 702
 703                return to_unix
 704
 705            return self.expression(exp.Extract, this=this, expression=expression)
 706
 707        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 708            if is_map:
 709                # Keys are strings in Snowflake's objects, see also:
 710                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 711                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 712                return self._parse_slice(self._parse_string())
 713
 714            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 715
 716        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 717            lateral = super()._parse_lateral()
 718            if not lateral:
 719                return lateral
 720
 721            if isinstance(lateral.this, exp.Explode):
 722                table_alias = lateral.args.get("alias")
 723                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 724                if table_alias and not table_alias.args.get("columns"):
 725                    table_alias.set("columns", columns)
 726                elif not table_alias:
 727                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 728
 729            return lateral
 730
 731        def _parse_table_parts(
 732            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 733        ) -> exp.Table:
 734            # https://docs.snowflake.com/en/user-guide/querying-stage
 735            if self._match(TokenType.STRING, advance=False):
 736                table = self._parse_string()
 737            elif self._match_text_seq("@", advance=False):
 738                table = self._parse_location_path()
 739            else:
 740                table = None
 741
 742            if table:
 743                file_format = None
 744                pattern = None
 745
 746                wrapped = self._match(TokenType.L_PAREN)
 747                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 748                    if self._match_text_seq("FILE_FORMAT", "=>"):
 749                        file_format = self._parse_string() or super()._parse_table_parts(
 750                            is_db_reference=is_db_reference
 751                        )
 752                    elif self._match_text_seq("PATTERN", "=>"):
 753                        pattern = self._parse_string()
 754                    else:
 755                        break
 756
 757                    self._match(TokenType.COMMA)
 758
 759                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 760            else:
 761                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 762
 763            return table
 764
 765        def _parse_table(
 766            self,
 767            schema: bool = False,
 768            joins: bool = False,
 769            alias_tokens: t.Optional[t.Collection[TokenType]] = None,
 770            parse_bracket: bool = False,
 771            is_db_reference: bool = False,
 772            parse_partition: bool = False,
 773        ) -> t.Optional[exp.Expression]:
 774            table = super()._parse_table(
 775                schema=schema,
 776                joins=joins,
 777                alias_tokens=alias_tokens,
 778                parse_bracket=parse_bracket,
 779                is_db_reference=is_db_reference,
 780                parse_partition=parse_partition,
 781            )
 782            if isinstance(table, exp.Table) and isinstance(table.this, exp.TableFromRows):
 783                table_from_rows = table.this
 784                for arg in exp.TableFromRows.arg_types:
 785                    if arg != "this":
 786                        table_from_rows.set(arg, table.args.get(arg))
 787
 788                table = table_from_rows
 789
 790            return table
 791
 792        def _parse_id_var(
 793            self,
 794            any_token: bool = True,
 795            tokens: t.Optional[t.Collection[TokenType]] = None,
 796        ) -> t.Optional[exp.Expression]:
 797            if self._match_text_seq("IDENTIFIER", "("):
 798                identifier = (
 799                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 800                    or self._parse_string()
 801                )
 802                self._match_r_paren()
 803                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 804
 805            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 806
 807        def _parse_show_snowflake(self, this: str) -> exp.Show:
 808            scope = None
 809            scope_kind = None
 810
 811            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 812            # which is syntactically valid but has no effect on the output
 813            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 814
 815            history = self._match_text_seq("HISTORY")
 816
 817            like = self._parse_string() if self._match(TokenType.LIKE) else None
 818
 819            if self._match(TokenType.IN):
 820                if self._match_text_seq("ACCOUNT"):
 821                    scope_kind = "ACCOUNT"
 822                elif self._match_text_seq("CLASS"):
 823                    scope_kind = "CLASS"
 824                    scope = self._parse_table_parts()
 825                elif self._match_text_seq("APPLICATION"):
 826                    scope_kind = "APPLICATION"
 827                    if self._match_text_seq("PACKAGE"):
 828                        scope_kind += " PACKAGE"
 829                    scope = self._parse_table_parts()
 830                elif self._match_set(self.DB_CREATABLES):
 831                    scope_kind = self._prev.text.upper()
 832                    if self._curr:
 833                        scope = self._parse_table_parts()
 834                elif self._curr:
 835                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 836                    scope = self._parse_table_parts()
 837
 838            return self.expression(
 839                exp.Show,
 840                **{
 841                    "terse": terse,
 842                    "this": this,
 843                    "history": history,
 844                    "like": like,
 845                    "scope": scope,
 846                    "scope_kind": scope_kind,
 847                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 848                    "limit": self._parse_limit(),
 849                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 850                    "privileges": self._match_text_seq("WITH", "PRIVILEGES")
 851                    and self._parse_csv(lambda: self._parse_var(any_token=True, upper=True)),
 852                },
 853            )
 854
 855        def _parse_put(self) -> exp.Put | exp.Command:
 856            if self._curr.token_type != TokenType.STRING:
 857                return self._parse_as_command(self._prev)
 858
 859            return self.expression(
 860                exp.Put,
 861                this=self._parse_string(),
 862                target=self._parse_location_path(),
 863                properties=self._parse_properties(),
 864            )
 865
 866        def _parse_get(self) -> t.Optional[exp.Expression]:
 867            start = self._prev
 868
 869            # If we detect GET( then we need to parse a function, not a statement
 870            if self._match(TokenType.L_PAREN):
 871                self._retreat(self._index - 2)
 872                return self._parse_expression()
 873
 874            target = self._parse_location_path()
 875
 876            # Parse as command if unquoted file path
 877            if self._curr.token_type == TokenType.URI_START:
 878                return self._parse_as_command(start)
 879
 880            return self.expression(
 881                exp.Get,
 882                this=self._parse_string(),
 883                target=target,
 884                properties=self._parse_properties(),
 885            )
 886
 887        def _parse_location_property(self) -> exp.LocationProperty:
 888            self._match(TokenType.EQ)
 889            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 890
 891        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 892            # Parse either a subquery or a staged file
 893            return (
 894                self._parse_select(table=True, parse_subquery_alias=False)
 895                if self._match(TokenType.L_PAREN, advance=False)
 896                else self._parse_table_parts()
 897            )
 898
 899        def _parse_location_path(self) -> exp.Var:
 900            start = self._curr
 901            self._advance_any(ignore_reserved=True)
 902
 903            # We avoid consuming a comma token because external tables like @foo and @bar
 904            # can be joined in a query with a comma separator, as well as closing paren
 905            # in case of subqueries
 906            while self._is_connected() and not self._match_set(
 907                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 908            ):
 909                self._advance_any(ignore_reserved=True)
 910
 911            return exp.var(self._find_sql(start, self._prev))
 912
 913        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 914            this = super()._parse_lambda_arg()
 915
 916            if not this:
 917                return this
 918
 919            typ = self._parse_types()
 920
 921            if typ:
 922                return self.expression(exp.Cast, this=this, to=typ)
 923
 924            return this
 925
 926        def _parse_foreign_key(self) -> exp.ForeignKey:
 927            # inlineFK, the REFERENCES columns are implied
 928            if self._match(TokenType.REFERENCES, advance=False):
 929                return self.expression(exp.ForeignKey)
 930
 931            # outoflineFK, explicitly names the columns
 932            return super()._parse_foreign_key()
 933
 934        def _parse_file_format_property(self) -> exp.FileFormatProperty:
 935            self._match(TokenType.EQ)
 936            if self._match(TokenType.L_PAREN, advance=False):
 937                expressions = self._parse_wrapped_options()
 938            else:
 939                expressions = [self._parse_format_name()]
 940
 941            return self.expression(
 942                exp.FileFormatProperty,
 943                expressions=expressions,
 944            )
 945
 946        def _parse_credentials_property(self) -> exp.CredentialsProperty:
 947            return self.expression(
 948                exp.CredentialsProperty,
 949                expressions=self._parse_wrapped_options(),
 950            )
 951
 952    class Tokenizer(tokens.Tokenizer):
 953        STRING_ESCAPES = ["\\", "'"]
 954        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 955        RAW_STRINGS = ["$$"]
 956        COMMENTS = ["--", "//", ("/*", "*/")]
 957        NESTED_COMMENTS = False
 958
 959        KEYWORDS = {
 960            **tokens.Tokenizer.KEYWORDS,
 961            "FILE://": TokenType.URI_START,
 962            "BYTEINT": TokenType.INT,
 963            "EXCLUDE": TokenType.EXCEPT,
 964            "FILE FORMAT": TokenType.FILE_FORMAT,
 965            "GET": TokenType.GET,
 966            "ILIKE ANY": TokenType.ILIKE_ANY,
 967            "LIKE ANY": TokenType.LIKE_ANY,
 968            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 969            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 970            "MINUS": TokenType.EXCEPT,
 971            "NCHAR VARYING": TokenType.VARCHAR,
 972            "PUT": TokenType.PUT,
 973            "REMOVE": TokenType.COMMAND,
 974            "RM": TokenType.COMMAND,
 975            "SAMPLE": TokenType.TABLE_SAMPLE,
 976            "SQL_DOUBLE": TokenType.DOUBLE,
 977            "SQL_VARCHAR": TokenType.VARCHAR,
 978            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 979            "TAG": TokenType.TAG,
 980            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 981            "TOP": TokenType.TOP,
 982            "WAREHOUSE": TokenType.WAREHOUSE,
 983            "STAGE": TokenType.STAGE,
 984            "STREAMLIT": TokenType.STREAMLIT,
 985        }
 986        KEYWORDS.pop("/*+")
 987
 988        SINGLE_TOKENS = {
 989            **tokens.Tokenizer.SINGLE_TOKENS,
 990            "$": TokenType.PARAMETER,
 991        }
 992
 993        VAR_SINGLE_TOKENS = {"$"}
 994
 995        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 996
 997    class Generator(generator.Generator):
 998        PARAMETER_TOKEN = "$"
 999        MATCHED_BY_SOURCE = False
1000        SINGLE_STRING_INTERVAL = True
1001        JOIN_HINTS = False
1002        TABLE_HINTS = False
1003        QUERY_HINTS = False
1004        AGGREGATE_FILTER_SUPPORTED = False
1005        SUPPORTS_TABLE_COPY = False
1006        COLLATE_IS_FUNC = True
1007        LIMIT_ONLY_LITERALS = True
1008        JSON_KEY_VALUE_PAIR_SEP = ","
1009        INSERT_OVERWRITE = " OVERWRITE INTO"
1010        STRUCT_DELIMITER = ("(", ")")
1011        COPY_PARAMS_ARE_WRAPPED = False
1012        COPY_PARAMS_EQ_REQUIRED = True
1013        STAR_EXCEPT = "EXCLUDE"
1014        SUPPORTS_EXPLODING_PROJECTIONS = False
1015        ARRAY_CONCAT_IS_VAR_LEN = False
1016        SUPPORTS_CONVERT_TIMEZONE = True
1017        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1018        SUPPORTS_MEDIAN = True
1019        ARRAY_SIZE_NAME = "ARRAY_SIZE"
1020
1021        TRANSFORMS = {
1022            **generator.Generator.TRANSFORMS,
1023            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1024            exp.ArgMax: rename_func("MAX_BY"),
1025            exp.ArgMin: rename_func("MIN_BY"),
1026            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
1027            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
1028            exp.ArrayIntersect: rename_func("ARRAY_INTERSECTION"),
1029            exp.AtTimeZone: lambda self, e: self.func(
1030                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
1031            ),
1032            exp.BitwiseOr: rename_func("BITOR"),
1033            exp.BitwiseXor: rename_func("BITXOR"),
1034            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
1035            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
1036            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
1037            exp.DateAdd: date_delta_sql("DATEADD"),
1038            exp.DateDiff: date_delta_sql("DATEDIFF"),
1039            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
1040            exp.DatetimeDiff: timestampdiff_sql,
1041            exp.DateStrToDate: datestrtodate_sql,
1042            exp.DayOfMonth: rename_func("DAYOFMONTH"),
1043            exp.DayOfWeek: rename_func("DAYOFWEEK"),
1044            exp.DayOfWeekIso: rename_func("DAYOFWEEKISO"),
1045            exp.DayOfYear: rename_func("DAYOFYEAR"),
1046            exp.Explode: rename_func("FLATTEN"),
1047            exp.Extract: lambda self, e: self.func(
1048                "DATE_PART", map_date_part(e.this, self.dialect), e.expression
1049            ),
1050            exp.FileFormatProperty: lambda self,
1051            e: f"FILE_FORMAT=({self.expressions(e, 'expressions', sep=' ')})",
1052            exp.FromTimeZone: lambda self, e: self.func(
1053                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
1054            ),
1055            exp.GenerateSeries: lambda self, e: self.func(
1056                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
1057            ),
1058            exp.GroupConcat: lambda self, e: groupconcat_sql(self, e, sep=""),
1059            exp.If: if_sql(name="IFF", false_value="NULL"),
1060            exp.JSONExtractArray: _json_extract_value_array_sql,
1061            exp.JSONExtractScalar: lambda self, e: self.func(
1062                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
1063            ),
1064            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
1065            exp.JSONPathRoot: lambda *_: "",
1066            exp.JSONValueArray: _json_extract_value_array_sql,
1067            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
1068                rename_func("EDITDISTANCE")
1069            ),
1070            exp.LocationProperty: lambda self, e: f"LOCATION={self.sql(e, 'this')}",
1071            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
1072            exp.LogicalOr: rename_func("BOOLOR_AGG"),
1073            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1074            exp.MakeInterval: no_make_interval_sql,
1075            exp.Max: max_or_greatest,
1076            exp.Min: min_or_least,
1077            exp.ParseJSON: lambda self, e: self.func(
1078                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
1079            ),
1080            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1081            exp.PercentileCont: transforms.preprocess(
1082                [transforms.add_within_group_for_percentiles]
1083            ),
1084            exp.PercentileDisc: transforms.preprocess(
1085                [transforms.add_within_group_for_percentiles]
1086            ),
1087            exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]),
1088            exp.RegexpExtract: _regexpextract_sql,
1089            exp.RegexpExtractAll: _regexpextract_sql,
1090            exp.RegexpILike: _regexpilike_sql,
1091            exp.Rand: rename_func("RANDOM"),
1092            exp.Select: transforms.preprocess(
1093                [
1094                    transforms.eliminate_window_clause,
1095                    transforms.eliminate_distinct_on,
1096                    transforms.explode_projection_to_unnest(),
1097                    transforms.eliminate_semi_and_anti_joins,
1098                    _transform_generate_date_array,
1099                ]
1100            ),
1101            exp.SHA: rename_func("SHA1"),
1102            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
1103            exp.StartsWith: rename_func("STARTSWITH"),
1104            exp.EndsWith: rename_func("ENDSWITH"),
1105            exp.StrPosition: lambda self, e: strposition_sql(
1106                self, e, func_name="CHARINDEX", supports_position=True
1107            ),
1108            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
1109            exp.Stuff: rename_func("INSERT"),
1110            exp.StPoint: rename_func("ST_MAKEPOINT"),
1111            exp.TimeAdd: date_delta_sql("TIMEADD"),
1112            exp.Timestamp: no_timestamp_sql,
1113            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
1114            exp.TimestampDiff: lambda self, e: self.func(
1115                "TIMESTAMPDIFF", e.unit, e.expression, e.this
1116            ),
1117            exp.TimestampTrunc: timestamptrunc_sql(),
1118            exp.TimeStrToTime: timestrtotime_sql,
1119            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
1120            exp.ToArray: rename_func("TO_ARRAY"),
1121            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
1122            exp.ToDouble: rename_func("TO_DOUBLE"),
1123            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
1124            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
1125            exp.TsOrDsToDate: lambda self, e: self.func(
1126                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
1127            ),
1128            exp.TsOrDsToTime: lambda self, e: self.func(
1129                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
1130            ),
1131            exp.Unhex: rename_func("HEX_DECODE_BINARY"),
1132            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
1133            exp.Uuid: rename_func("UUID_STRING"),
1134            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1135            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
1136            exp.Xor: rename_func("BOOLXOR"),
1137        }
1138
1139        SUPPORTED_JSON_PATH_PARTS = {
1140            exp.JSONPathKey,
1141            exp.JSONPathRoot,
1142            exp.JSONPathSubscript,
1143        }
1144
1145        TYPE_MAPPING = {
1146            **generator.Generator.TYPE_MAPPING,
1147            exp.DataType.Type.NESTED: "OBJECT",
1148            exp.DataType.Type.STRUCT: "OBJECT",
1149            exp.DataType.Type.BIGDECIMAL: "DOUBLE",
1150        }
1151
1152        TOKEN_MAPPING = {
1153            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
1154        }
1155
1156        PROPERTIES_LOCATION = {
1157            **generator.Generator.PROPERTIES_LOCATION,
1158            exp.CredentialsProperty: exp.Properties.Location.POST_WITH,
1159            exp.LocationProperty: exp.Properties.Location.POST_WITH,
1160            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1161            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1162            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1163        }
1164
1165        UNSUPPORTED_VALUES_EXPRESSIONS = {
1166            exp.Map,
1167            exp.StarMap,
1168            exp.Struct,
1169            exp.VarMap,
1170        }
1171
1172        RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS = (exp.ArrayAgg,)
1173
1174        def with_properties(self, properties: exp.Properties) -> str:
1175            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1176
1177        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1178            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1179                values_as_table = False
1180
1181            return super().values_sql(expression, values_as_table=values_as_table)
1182
1183        def datatype_sql(self, expression: exp.DataType) -> str:
1184            expressions = expression.expressions
1185            if (
1186                expressions
1187                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1188                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1189            ):
1190                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1191                return "OBJECT"
1192
1193            return super().datatype_sql(expression)
1194
1195        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1196            return self.func(
1197                "TO_NUMBER",
1198                expression.this,
1199                expression.args.get("format"),
1200                expression.args.get("precision"),
1201                expression.args.get("scale"),
1202            )
1203
1204        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1205            milli = expression.args.get("milli")
1206            if milli is not None:
1207                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1208                expression.set("nano", milli_to_nano)
1209
1210            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1211
1212        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1213            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1214                return self.func("TO_GEOGRAPHY", expression.this)
1215            if expression.is_type(exp.DataType.Type.GEOMETRY):
1216                return self.func("TO_GEOMETRY", expression.this)
1217
1218            return super().cast_sql(expression, safe_prefix=safe_prefix)
1219
1220        def trycast_sql(self, expression: exp.TryCast) -> str:
1221            value = expression.this
1222
1223            if value.type is None:
1224                from sqlglot.optimizer.annotate_types import annotate_types
1225
1226                value = annotate_types(value, dialect=self.dialect)
1227
1228            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1229                return super().trycast_sql(expression)
1230
1231            # TRY_CAST only works for string values in Snowflake
1232            return self.cast_sql(expression)
1233
1234        def log_sql(self, expression: exp.Log) -> str:
1235            if not expression.expression:
1236                return self.func("LN", expression.this)
1237
1238            return super().log_sql(expression)
1239
1240        def unnest_sql(self, expression: exp.Unnest) -> str:
1241            unnest_alias = expression.args.get("alias")
1242            offset = expression.args.get("offset")
1243
1244            unnest_alias_columns = unnest_alias.columns if unnest_alias else []
1245            value = seq_get(unnest_alias_columns, 0) or exp.to_identifier("value")
1246
1247            columns = [
1248                exp.to_identifier("seq"),
1249                exp.to_identifier("key"),
1250                exp.to_identifier("path"),
1251                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1252                value,
1253                exp.to_identifier("this"),
1254            ]
1255
1256            if unnest_alias:
1257                unnest_alias.set("columns", columns)
1258            else:
1259                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1260
1261            table_input = self.sql(expression.expressions[0])
1262            if not table_input.startswith("INPUT =>"):
1263                table_input = f"INPUT => {table_input}"
1264
1265            explode = f"TABLE(FLATTEN({table_input}))"
1266            alias = self.sql(unnest_alias)
1267            alias = f" AS {alias}" if alias else ""
1268            value = "" if isinstance(expression.parent, (exp.From, exp.Join)) else f"{value} FROM "
1269
1270            return f"{value}{explode}{alias}"
1271
1272        def show_sql(self, expression: exp.Show) -> str:
1273            terse = "TERSE " if expression.args.get("terse") else ""
1274            history = " HISTORY" if expression.args.get("history") else ""
1275            like = self.sql(expression, "like")
1276            like = f" LIKE {like}" if like else ""
1277
1278            scope = self.sql(expression, "scope")
1279            scope = f" {scope}" if scope else ""
1280
1281            scope_kind = self.sql(expression, "scope_kind")
1282            if scope_kind:
1283                scope_kind = f" IN {scope_kind}"
1284
1285            starts_with = self.sql(expression, "starts_with")
1286            if starts_with:
1287                starts_with = f" STARTS WITH {starts_with}"
1288
1289            limit = self.sql(expression, "limit")
1290
1291            from_ = self.sql(expression, "from")
1292            if from_:
1293                from_ = f" FROM {from_}"
1294
1295            privileges = self.expressions(expression, key="privileges", flat=True)
1296            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1297
1298            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
1299
1300        def describe_sql(self, expression: exp.Describe) -> str:
1301            # Default to table if kind is unknown
1302            kind_value = expression.args.get("kind") or "TABLE"
1303            kind = f" {kind_value}" if kind_value else ""
1304            this = f" {self.sql(expression, 'this')}"
1305            expressions = self.expressions(expression, flat=True)
1306            expressions = f" {expressions}" if expressions else ""
1307            return f"DESCRIBE{kind}{this}{expressions}"
1308
1309        def generatedasidentitycolumnconstraint_sql(
1310            self, expression: exp.GeneratedAsIdentityColumnConstraint
1311        ) -> str:
1312            start = expression.args.get("start")
1313            start = f" START {start}" if start else ""
1314            increment = expression.args.get("increment")
1315            increment = f" INCREMENT {increment}" if increment else ""
1316            return f"AUTOINCREMENT{start}{increment}"
1317
1318        def cluster_sql(self, expression: exp.Cluster) -> str:
1319            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1320
1321        def struct_sql(self, expression: exp.Struct) -> str:
1322            keys = []
1323            values = []
1324
1325            for i, e in enumerate(expression.expressions):
1326                if isinstance(e, exp.PropertyEQ):
1327                    keys.append(
1328                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1329                    )
1330                    values.append(e.expression)
1331                else:
1332                    keys.append(exp.Literal.string(f"_{i}"))
1333                    values.append(e)
1334
1335            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1336
1337        @unsupported_args("weight", "accuracy")
1338        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1339            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1340
1341        def alterset_sql(self, expression: exp.AlterSet) -> str:
1342            exprs = self.expressions(expression, flat=True)
1343            exprs = f" {exprs}" if exprs else ""
1344            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1345            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1346            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1347            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1348            tag = self.expressions(expression, key="tag", flat=True)
1349            tag = f" TAG {tag}" if tag else ""
1350
1351            return f"SET{exprs}{file_format}{copy_options}{tag}"
1352
1353        def strtotime_sql(self, expression: exp.StrToTime):
1354            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1355            return self.func(
1356                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1357            )
1358
1359        def timestampsub_sql(self, expression: exp.TimestampSub):
1360            return self.sql(
1361                exp.TimestampAdd(
1362                    this=expression.this,
1363                    expression=expression.expression * -1,
1364                    unit=expression.unit,
1365                )
1366            )
1367
1368        def jsonextract_sql(self, expression: exp.JSONExtract):
1369            this = expression.this
1370
1371            # JSON strings are valid coming from other dialects such as BQ
1372            return self.func(
1373                "GET_PATH",
1374                exp.ParseJSON(this=this) if this.is_string else this,
1375                expression.expression,
1376            )
1377
1378        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1379            this = expression.this
1380            if not isinstance(this, exp.TsOrDsToTimestamp):
1381                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1382
1383            return self.func("TO_CHAR", this, self.format_time(expression))
1384
1385        def datesub_sql(self, expression: exp.DateSub) -> str:
1386            value = expression.expression
1387            if value:
1388                value.replace(value * (-1))
1389            else:
1390                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1391
1392            return date_delta_sql("DATEADD")(self, expression)
1393
1394        def select_sql(self, expression: exp.Select) -> str:
1395            limit = expression.args.get("limit")
1396            offset = expression.args.get("offset")
1397            if offset and not limit:
1398                expression.limit(exp.Null(), copy=False)
1399            return super().select_sql(expression)
1400
1401        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
1402            is_materialized = expression.find(exp.MaterializedProperty)
1403            copy_grants_property = expression.find(exp.CopyGrantsProperty)
1404
1405            if expression.kind == "VIEW" and is_materialized and copy_grants_property:
1406                # For materialized views, COPY GRANTS is located *before* the columns list
1407                # This is in contrast to normal views where COPY GRANTS is located *after* the columns list
1408                # We default CopyGrantsProperty to POST_SCHEMA which means we need to output it POST_NAME if a materialized view is detected
1409                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-materialized-view#syntax
1410                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-view#syntax
1411                post_schema_properties = locations[exp.Properties.Location.POST_SCHEMA]
1412                post_schema_properties.pop(post_schema_properties.index(copy_grants_property))
1413
1414                this_name = self.sql(expression.this, "this")
1415                copy_grants = self.sql(copy_grants_property)
1416                this_schema = self.schema_columns_sql(expression.this)
1417                this_schema = f"{self.sep()}{this_schema}" if this_schema else ""
1418
1419                return f"{this_name}{self.sep()}{copy_grants}{this_schema}"
1420
1421            return super().createable_sql(expression, locations)
1422
1423        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
1424            this = expression.this
1425
1426            # If an ORDER BY clause is present, we need to remove it from ARRAY_AGG
1427            # and add it later as part of the WITHIN GROUP clause
1428            order = this if isinstance(this, exp.Order) else None
1429            if order:
1430                expression.set("this", order.this.pop())
1431
1432            expr_sql = super().arrayagg_sql(expression)
1433
1434            if order:
1435                expr_sql = self.sql(exp.WithinGroup(this=expr_sql, expression=order))
1436
1437            return expr_sql
1438
1439        def array_sql(self, expression: exp.Array) -> str:
1440            expressions = expression.expressions
1441
1442            first_expr = seq_get(expressions, 0)
1443            if isinstance(first_expr, exp.Select):
1444                # SELECT AS STRUCT foo AS alias_foo -> ARRAY_AGG(OBJECT_CONSTRUCT('alias_foo', foo))
1445                if first_expr.text("kind").upper() == "STRUCT":
1446                    object_construct_args = []
1447                    for expr in first_expr.expressions:
1448                        # Alias case: SELECT AS STRUCT foo AS alias_foo -> OBJECT_CONSTRUCT('alias_foo', foo)
1449                        # Column case: SELECT AS STRUCT foo -> OBJECT_CONSTRUCT('foo', foo)
1450                        name = expr.this if isinstance(expr, exp.Alias) else expr
1451
1452                        object_construct_args.extend([exp.Literal.string(expr.alias_or_name), name])
1453
1454                    array_agg = exp.ArrayAgg(
1455                        this=_build_object_construct(args=object_construct_args)
1456                    )
1457
1458                    first_expr.set("kind", None)
1459                    first_expr.set("expressions", [array_agg])
1460
1461                    return self.sql(first_expr.subquery())
1462
1463            return inline_array_sql(self, expression)
class Snowflake(sqlglot.dialects.dialect.Dialect):
 337class Snowflake(Dialect):
 338    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 339    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 340    NULL_ORDERING = "nulls_are_large"
 341    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 342    SUPPORTS_USER_DEFINED_TYPES = False
 343    SUPPORTS_SEMI_ANTI_JOIN = False
 344    PREFER_CTE_ALIAS_COLUMN = True
 345    TABLESAMPLE_SIZE_IS_PERCENT = True
 346    COPY_PARAMS_ARE_CSV = False
 347    ARRAY_AGG_INCLUDES_NULLS = None
 348
 349    TIME_MAPPING = {
 350        "YYYY": "%Y",
 351        "yyyy": "%Y",
 352        "YY": "%y",
 353        "yy": "%y",
 354        "MMMM": "%B",
 355        "mmmm": "%B",
 356        "MON": "%b",
 357        "mon": "%b",
 358        "MM": "%m",
 359        "mm": "%m",
 360        "DD": "%d",
 361        "dd": "%-d",
 362        "DY": "%a",
 363        "dy": "%w",
 364        "HH24": "%H",
 365        "hh24": "%H",
 366        "HH12": "%I",
 367        "hh12": "%I",
 368        "MI": "%M",
 369        "mi": "%M",
 370        "SS": "%S",
 371        "ss": "%S",
 372        "FF6": "%f",
 373        "ff6": "%f",
 374    }
 375
 376    DATE_PART_MAPPING = {
 377        **Dialect.DATE_PART_MAPPING,
 378        "ISOWEEK": "WEEKISO",
 379    }
 380
 381    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 382        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 383        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 384        if (
 385            isinstance(expression, exp.Identifier)
 386            and isinstance(expression.parent, exp.Table)
 387            and expression.name.lower() == "dual"
 388        ):
 389            return expression  # type: ignore
 390
 391        return super().quote_identifier(expression, identify=identify)
 392
 393    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
 394        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
 395        SINGLE_TOKENS.pop("$")
 396
 397    class Parser(parser.Parser):
 398        IDENTIFY_PIVOT_STRINGS = True
 399        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 400        COLON_IS_VARIANT_EXTRACT = True
 401
 402        ID_VAR_TOKENS = {
 403            *parser.Parser.ID_VAR_TOKENS,
 404            TokenType.MATCH_CONDITION,
 405        }
 406
 407        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 408        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 409
 410        COLON_PLACEHOLDER_TOKENS = ID_VAR_TOKENS | {TokenType.NUMBER}
 411
 412        FUNCTIONS = {
 413            **parser.Parser.FUNCTIONS,
 414            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 415            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 416            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 417                this=seq_get(args, 1), expression=seq_get(args, 0)
 418            ),
 419            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 420                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 421                start=seq_get(args, 0),
 422                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 423                step=seq_get(args, 2),
 424            ),
 425            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 426            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 427            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 428            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 429            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
 430            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
 431            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
 432            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
 433            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
 434            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 435            "DATE_TRUNC": _date_trunc_to_time,
 436            "DATEADD": _build_date_time_add(exp.DateAdd),
 437            "DATEDIFF": _build_datediff,
 438            "DIV0": _build_if_from_div0,
 439            "EDITDISTANCE": lambda args: exp.Levenshtein(
 440                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 441            ),
 442            "FLATTEN": exp.Explode.from_arg_list,
 443            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 444                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 445            ),
 446            "HEX_DECODE_BINARY": exp.Unhex.from_arg_list,
 447            "IFF": exp.If.from_arg_list,
 448            "LAST_DAY": lambda args: exp.LastDay(
 449                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 450            ),
 451            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 452            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 453            "NULLIFZERO": _build_if_from_nullifzero,
 454            "OBJECT_CONSTRUCT": _build_object_construct,
 455            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 456            "REGEXP_REPLACE": _build_regexp_replace,
 457            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 458            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 459            "RLIKE": exp.RegexpLike.from_arg_list,
 460            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 461            "TABLE": lambda args: exp.TableFromRows(this=seq_get(args, 0)),
 462            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 463            "TIMEDIFF": _build_datediff,
 464            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 465            "TIMESTAMPDIFF": _build_datediff,
 466            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 467            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 468            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 469            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 470            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 471            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 472            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
 473            "TRY_TO_TIMESTAMP": _build_datetime(
 474                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 475            ),
 476            "TO_CHAR": build_timetostr_or_tochar,
 477            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 478            "TO_NUMBER": lambda args: exp.ToNumber(
 479                this=seq_get(args, 0),
 480                format=seq_get(args, 1),
 481                precision=seq_get(args, 2),
 482                scale=seq_get(args, 3),
 483            ),
 484            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 485            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 486            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 487            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 488            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 489            "TO_VARCHAR": exp.ToChar.from_arg_list,
 490            "ZEROIFNULL": _build_if_from_zeroifnull,
 491        }
 492
 493        FUNCTION_PARSERS = {
 494            **parser.Parser.FUNCTION_PARSERS,
 495            "DATE_PART": lambda self: self._parse_date_part(),
 496            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 497            "LISTAGG": lambda self: self._parse_string_agg(),
 498        }
 499        FUNCTION_PARSERS.pop("TRIM")
 500
 501        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 502
 503        RANGE_PARSERS = {
 504            **parser.Parser.RANGE_PARSERS,
 505            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 506            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 507        }
 508
 509        ALTER_PARSERS = {
 510            **parser.Parser.ALTER_PARSERS,
 511            "UNSET": lambda self: self.expression(
 512                exp.Set,
 513                tag=self._match_text_seq("TAG"),
 514                expressions=self._parse_csv(self._parse_id_var),
 515                unset=True,
 516            ),
 517        }
 518
 519        STATEMENT_PARSERS = {
 520            **parser.Parser.STATEMENT_PARSERS,
 521            TokenType.GET: lambda self: self._parse_get(),
 522            TokenType.PUT: lambda self: self._parse_put(),
 523            TokenType.SHOW: lambda self: self._parse_show(),
 524        }
 525
 526        PROPERTY_PARSERS = {
 527            **parser.Parser.PROPERTY_PARSERS,
 528            "CREDENTIALS": lambda self: self._parse_credentials_property(),
 529            "FILE_FORMAT": lambda self: self._parse_file_format_property(),
 530            "LOCATION": lambda self: self._parse_location_property(),
 531            "TAG": lambda self: self._parse_tag(),
 532            "USING": lambda self: self._match_text_seq("TEMPLATE")
 533            and self.expression(exp.UsingTemplateProperty, this=self._parse_statement()),
 534        }
 535
 536        TYPE_CONVERTERS = {
 537            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 538            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 539        }
 540
 541        SHOW_PARSERS = {
 542            "DATABASES": _show_parser("DATABASES"),
 543            "TERSE DATABASES": _show_parser("DATABASES"),
 544            "SCHEMAS": _show_parser("SCHEMAS"),
 545            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 546            "OBJECTS": _show_parser("OBJECTS"),
 547            "TERSE OBJECTS": _show_parser("OBJECTS"),
 548            "TABLES": _show_parser("TABLES"),
 549            "TERSE TABLES": _show_parser("TABLES"),
 550            "VIEWS": _show_parser("VIEWS"),
 551            "TERSE VIEWS": _show_parser("VIEWS"),
 552            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 553            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 554            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 555            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 556            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 557            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 558            "SEQUENCES": _show_parser("SEQUENCES"),
 559            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 560            "STAGES": _show_parser("STAGES"),
 561            "COLUMNS": _show_parser("COLUMNS"),
 562            "USERS": _show_parser("USERS"),
 563            "TERSE USERS": _show_parser("USERS"),
 564            "FILE FORMATS": _show_parser("FILE FORMATS"),
 565            "FUNCTIONS": _show_parser("FUNCTIONS"),
 566            "PROCEDURES": _show_parser("PROCEDURES"),
 567            "WAREHOUSES": _show_parser("WAREHOUSES"),
 568        }
 569
 570        CONSTRAINT_PARSERS = {
 571            **parser.Parser.CONSTRAINT_PARSERS,
 572            "WITH": lambda self: self._parse_with_constraint(),
 573            "MASKING": lambda self: self._parse_with_constraint(),
 574            "PROJECTION": lambda self: self._parse_with_constraint(),
 575            "TAG": lambda self: self._parse_with_constraint(),
 576        }
 577
 578        STAGED_FILE_SINGLE_TOKENS = {
 579            TokenType.DOT,
 580            TokenType.MOD,
 581            TokenType.SLASH,
 582        }
 583
 584        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 585
 586        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 587
 588        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 589
 590        LAMBDAS = {
 591            **parser.Parser.LAMBDAS,
 592            TokenType.ARROW: lambda self, expressions: self.expression(
 593                exp.Lambda,
 594                this=self._replace_lambda(
 595                    self._parse_assignment(),
 596                    expressions,
 597                ),
 598                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 599            ),
 600        }
 601
 602        def _parse_use(self) -> exp.Use:
 603            if self._match_text_seq("SECONDARY", "ROLES"):
 604                this = self._match_texts(("ALL", "NONE")) and exp.var(self._prev.text.upper())
 605                roles = None if this else self._parse_csv(lambda: self._parse_table(schema=False))
 606                return self.expression(
 607                    exp.Use, kind="SECONDARY ROLES", this=this, expressions=roles
 608                )
 609
 610            return super()._parse_use()
 611
 612        def _negate_range(
 613            self, this: t.Optional[exp.Expression] = None
 614        ) -> t.Optional[exp.Expression]:
 615            if not this:
 616                return this
 617
 618            query = this.args.get("query")
 619            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 620                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 621                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 622                # which can produce different results (most likely a SnowFlake bug).
 623                #
 624                # https://docs.snowflake.com/en/sql-reference/functions/in
 625                # Context: https://github.com/tobymao/sqlglot/issues/3890
 626                return self.expression(
 627                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 628                )
 629
 630            return self.expression(exp.Not, this=this)
 631
 632        def _parse_tag(self) -> exp.Tags:
 633            return self.expression(
 634                exp.Tags,
 635                expressions=self._parse_wrapped_csv(self._parse_property),
 636            )
 637
 638        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 639            if self._prev.token_type != TokenType.WITH:
 640                self._retreat(self._index - 1)
 641
 642            if self._match_text_seq("MASKING", "POLICY"):
 643                policy = self._parse_column()
 644                return self.expression(
 645                    exp.MaskingPolicyColumnConstraint,
 646                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 647                    expressions=self._match(TokenType.USING)
 648                    and self._parse_wrapped_csv(self._parse_id_var),
 649                )
 650            if self._match_text_seq("PROJECTION", "POLICY"):
 651                policy = self._parse_column()
 652                return self.expression(
 653                    exp.ProjectionPolicyColumnConstraint,
 654                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 655                )
 656            if self._match(TokenType.TAG):
 657                return self._parse_tag()
 658
 659            return None
 660
 661        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 662            if self._match(TokenType.TAG):
 663                return self._parse_tag()
 664
 665            return super()._parse_with_property()
 666
 667        def _parse_create(self) -> exp.Create | exp.Command:
 668            expression = super()._parse_create()
 669            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 670                # Replace the Table node with the enclosed Identifier
 671                expression.this.replace(expression.this.this)
 672
 673            return expression
 674
 675        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 676        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 677        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 678            this = self._parse_var() or self._parse_type()
 679
 680            if not this:
 681                return None
 682
 683            self._match(TokenType.COMMA)
 684            expression = self._parse_bitwise()
 685            this = map_date_part(this)
 686            name = this.name.upper()
 687
 688            if name.startswith("EPOCH"):
 689                if name == "EPOCH_MILLISECOND":
 690                    scale = 10**3
 691                elif name == "EPOCH_MICROSECOND":
 692                    scale = 10**6
 693                elif name == "EPOCH_NANOSECOND":
 694                    scale = 10**9
 695                else:
 696                    scale = None
 697
 698                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 699                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 700
 701                if scale:
 702                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 703
 704                return to_unix
 705
 706            return self.expression(exp.Extract, this=this, expression=expression)
 707
 708        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 709            if is_map:
 710                # Keys are strings in Snowflake's objects, see also:
 711                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 712                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 713                return self._parse_slice(self._parse_string())
 714
 715            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 716
 717        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 718            lateral = super()._parse_lateral()
 719            if not lateral:
 720                return lateral
 721
 722            if isinstance(lateral.this, exp.Explode):
 723                table_alias = lateral.args.get("alias")
 724                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 725                if table_alias and not table_alias.args.get("columns"):
 726                    table_alias.set("columns", columns)
 727                elif not table_alias:
 728                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 729
 730            return lateral
 731
 732        def _parse_table_parts(
 733            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 734        ) -> exp.Table:
 735            # https://docs.snowflake.com/en/user-guide/querying-stage
 736            if self._match(TokenType.STRING, advance=False):
 737                table = self._parse_string()
 738            elif self._match_text_seq("@", advance=False):
 739                table = self._parse_location_path()
 740            else:
 741                table = None
 742
 743            if table:
 744                file_format = None
 745                pattern = None
 746
 747                wrapped = self._match(TokenType.L_PAREN)
 748                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 749                    if self._match_text_seq("FILE_FORMAT", "=>"):
 750                        file_format = self._parse_string() or super()._parse_table_parts(
 751                            is_db_reference=is_db_reference
 752                        )
 753                    elif self._match_text_seq("PATTERN", "=>"):
 754                        pattern = self._parse_string()
 755                    else:
 756                        break
 757
 758                    self._match(TokenType.COMMA)
 759
 760                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 761            else:
 762                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 763
 764            return table
 765
 766        def _parse_table(
 767            self,
 768            schema: bool = False,
 769            joins: bool = False,
 770            alias_tokens: t.Optional[t.Collection[TokenType]] = None,
 771            parse_bracket: bool = False,
 772            is_db_reference: bool = False,
 773            parse_partition: bool = False,
 774        ) -> t.Optional[exp.Expression]:
 775            table = super()._parse_table(
 776                schema=schema,
 777                joins=joins,
 778                alias_tokens=alias_tokens,
 779                parse_bracket=parse_bracket,
 780                is_db_reference=is_db_reference,
 781                parse_partition=parse_partition,
 782            )
 783            if isinstance(table, exp.Table) and isinstance(table.this, exp.TableFromRows):
 784                table_from_rows = table.this
 785                for arg in exp.TableFromRows.arg_types:
 786                    if arg != "this":
 787                        table_from_rows.set(arg, table.args.get(arg))
 788
 789                table = table_from_rows
 790
 791            return table
 792
 793        def _parse_id_var(
 794            self,
 795            any_token: bool = True,
 796            tokens: t.Optional[t.Collection[TokenType]] = None,
 797        ) -> t.Optional[exp.Expression]:
 798            if self._match_text_seq("IDENTIFIER", "("):
 799                identifier = (
 800                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 801                    or self._parse_string()
 802                )
 803                self._match_r_paren()
 804                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 805
 806            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 807
 808        def _parse_show_snowflake(self, this: str) -> exp.Show:
 809            scope = None
 810            scope_kind = None
 811
 812            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 813            # which is syntactically valid but has no effect on the output
 814            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 815
 816            history = self._match_text_seq("HISTORY")
 817
 818            like = self._parse_string() if self._match(TokenType.LIKE) else None
 819
 820            if self._match(TokenType.IN):
 821                if self._match_text_seq("ACCOUNT"):
 822                    scope_kind = "ACCOUNT"
 823                elif self._match_text_seq("CLASS"):
 824                    scope_kind = "CLASS"
 825                    scope = self._parse_table_parts()
 826                elif self._match_text_seq("APPLICATION"):
 827                    scope_kind = "APPLICATION"
 828                    if self._match_text_seq("PACKAGE"):
 829                        scope_kind += " PACKAGE"
 830                    scope = self._parse_table_parts()
 831                elif self._match_set(self.DB_CREATABLES):
 832                    scope_kind = self._prev.text.upper()
 833                    if self._curr:
 834                        scope = self._parse_table_parts()
 835                elif self._curr:
 836                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 837                    scope = self._parse_table_parts()
 838
 839            return self.expression(
 840                exp.Show,
 841                **{
 842                    "terse": terse,
 843                    "this": this,
 844                    "history": history,
 845                    "like": like,
 846                    "scope": scope,
 847                    "scope_kind": scope_kind,
 848                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 849                    "limit": self._parse_limit(),
 850                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 851                    "privileges": self._match_text_seq("WITH", "PRIVILEGES")
 852                    and self._parse_csv(lambda: self._parse_var(any_token=True, upper=True)),
 853                },
 854            )
 855
 856        def _parse_put(self) -> exp.Put | exp.Command:
 857            if self._curr.token_type != TokenType.STRING:
 858                return self._parse_as_command(self._prev)
 859
 860            return self.expression(
 861                exp.Put,
 862                this=self._parse_string(),
 863                target=self._parse_location_path(),
 864                properties=self._parse_properties(),
 865            )
 866
 867        def _parse_get(self) -> t.Optional[exp.Expression]:
 868            start = self._prev
 869
 870            # If we detect GET( then we need to parse a function, not a statement
 871            if self._match(TokenType.L_PAREN):
 872                self._retreat(self._index - 2)
 873                return self._parse_expression()
 874
 875            target = self._parse_location_path()
 876
 877            # Parse as command if unquoted file path
 878            if self._curr.token_type == TokenType.URI_START:
 879                return self._parse_as_command(start)
 880
 881            return self.expression(
 882                exp.Get,
 883                this=self._parse_string(),
 884                target=target,
 885                properties=self._parse_properties(),
 886            )
 887
 888        def _parse_location_property(self) -> exp.LocationProperty:
 889            self._match(TokenType.EQ)
 890            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 891
 892        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 893            # Parse either a subquery or a staged file
 894            return (
 895                self._parse_select(table=True, parse_subquery_alias=False)
 896                if self._match(TokenType.L_PAREN, advance=False)
 897                else self._parse_table_parts()
 898            )
 899
 900        def _parse_location_path(self) -> exp.Var:
 901            start = self._curr
 902            self._advance_any(ignore_reserved=True)
 903
 904            # We avoid consuming a comma token because external tables like @foo and @bar
 905            # can be joined in a query with a comma separator, as well as closing paren
 906            # in case of subqueries
 907            while self._is_connected() and not self._match_set(
 908                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 909            ):
 910                self._advance_any(ignore_reserved=True)
 911
 912            return exp.var(self._find_sql(start, self._prev))
 913
 914        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 915            this = super()._parse_lambda_arg()
 916
 917            if not this:
 918                return this
 919
 920            typ = self._parse_types()
 921
 922            if typ:
 923                return self.expression(exp.Cast, this=this, to=typ)
 924
 925            return this
 926
 927        def _parse_foreign_key(self) -> exp.ForeignKey:
 928            # inlineFK, the REFERENCES columns are implied
 929            if self._match(TokenType.REFERENCES, advance=False):
 930                return self.expression(exp.ForeignKey)
 931
 932            # outoflineFK, explicitly names the columns
 933            return super()._parse_foreign_key()
 934
 935        def _parse_file_format_property(self) -> exp.FileFormatProperty:
 936            self._match(TokenType.EQ)
 937            if self._match(TokenType.L_PAREN, advance=False):
 938                expressions = self._parse_wrapped_options()
 939            else:
 940                expressions = [self._parse_format_name()]
 941
 942            return self.expression(
 943                exp.FileFormatProperty,
 944                expressions=expressions,
 945            )
 946
 947        def _parse_credentials_property(self) -> exp.CredentialsProperty:
 948            return self.expression(
 949                exp.CredentialsProperty,
 950                expressions=self._parse_wrapped_options(),
 951            )
 952
 953    class Tokenizer(tokens.Tokenizer):
 954        STRING_ESCAPES = ["\\", "'"]
 955        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 956        RAW_STRINGS = ["$$"]
 957        COMMENTS = ["--", "//", ("/*", "*/")]
 958        NESTED_COMMENTS = False
 959
 960        KEYWORDS = {
 961            **tokens.Tokenizer.KEYWORDS,
 962            "FILE://": TokenType.URI_START,
 963            "BYTEINT": TokenType.INT,
 964            "EXCLUDE": TokenType.EXCEPT,
 965            "FILE FORMAT": TokenType.FILE_FORMAT,
 966            "GET": TokenType.GET,
 967            "ILIKE ANY": TokenType.ILIKE_ANY,
 968            "LIKE ANY": TokenType.LIKE_ANY,
 969            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 970            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 971            "MINUS": TokenType.EXCEPT,
 972            "NCHAR VARYING": TokenType.VARCHAR,
 973            "PUT": TokenType.PUT,
 974            "REMOVE": TokenType.COMMAND,
 975            "RM": TokenType.COMMAND,
 976            "SAMPLE": TokenType.TABLE_SAMPLE,
 977            "SQL_DOUBLE": TokenType.DOUBLE,
 978            "SQL_VARCHAR": TokenType.VARCHAR,
 979            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 980            "TAG": TokenType.TAG,
 981            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 982            "TOP": TokenType.TOP,
 983            "WAREHOUSE": TokenType.WAREHOUSE,
 984            "STAGE": TokenType.STAGE,
 985            "STREAMLIT": TokenType.STREAMLIT,
 986        }
 987        KEYWORDS.pop("/*+")
 988
 989        SINGLE_TOKENS = {
 990            **tokens.Tokenizer.SINGLE_TOKENS,
 991            "$": TokenType.PARAMETER,
 992        }
 993
 994        VAR_SINGLE_TOKENS = {"$"}
 995
 996        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 997
 998    class Generator(generator.Generator):
 999        PARAMETER_TOKEN = "$"
1000        MATCHED_BY_SOURCE = False
1001        SINGLE_STRING_INTERVAL = True
1002        JOIN_HINTS = False
1003        TABLE_HINTS = False
1004        QUERY_HINTS = False
1005        AGGREGATE_FILTER_SUPPORTED = False
1006        SUPPORTS_TABLE_COPY = False
1007        COLLATE_IS_FUNC = True
1008        LIMIT_ONLY_LITERALS = True
1009        JSON_KEY_VALUE_PAIR_SEP = ","
1010        INSERT_OVERWRITE = " OVERWRITE INTO"
1011        STRUCT_DELIMITER = ("(", ")")
1012        COPY_PARAMS_ARE_WRAPPED = False
1013        COPY_PARAMS_EQ_REQUIRED = True
1014        STAR_EXCEPT = "EXCLUDE"
1015        SUPPORTS_EXPLODING_PROJECTIONS = False
1016        ARRAY_CONCAT_IS_VAR_LEN = False
1017        SUPPORTS_CONVERT_TIMEZONE = True
1018        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1019        SUPPORTS_MEDIAN = True
1020        ARRAY_SIZE_NAME = "ARRAY_SIZE"
1021
1022        TRANSFORMS = {
1023            **generator.Generator.TRANSFORMS,
1024            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1025            exp.ArgMax: rename_func("MAX_BY"),
1026            exp.ArgMin: rename_func("MIN_BY"),
1027            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
1028            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
1029            exp.ArrayIntersect: rename_func("ARRAY_INTERSECTION"),
1030            exp.AtTimeZone: lambda self, e: self.func(
1031                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
1032            ),
1033            exp.BitwiseOr: rename_func("BITOR"),
1034            exp.BitwiseXor: rename_func("BITXOR"),
1035            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
1036            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
1037            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
1038            exp.DateAdd: date_delta_sql("DATEADD"),
1039            exp.DateDiff: date_delta_sql("DATEDIFF"),
1040            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
1041            exp.DatetimeDiff: timestampdiff_sql,
1042            exp.DateStrToDate: datestrtodate_sql,
1043            exp.DayOfMonth: rename_func("DAYOFMONTH"),
1044            exp.DayOfWeek: rename_func("DAYOFWEEK"),
1045            exp.DayOfWeekIso: rename_func("DAYOFWEEKISO"),
1046            exp.DayOfYear: rename_func("DAYOFYEAR"),
1047            exp.Explode: rename_func("FLATTEN"),
1048            exp.Extract: lambda self, e: self.func(
1049                "DATE_PART", map_date_part(e.this, self.dialect), e.expression
1050            ),
1051            exp.FileFormatProperty: lambda self,
1052            e: f"FILE_FORMAT=({self.expressions(e, 'expressions', sep=' ')})",
1053            exp.FromTimeZone: lambda self, e: self.func(
1054                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
1055            ),
1056            exp.GenerateSeries: lambda self, e: self.func(
1057                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
1058            ),
1059            exp.GroupConcat: lambda self, e: groupconcat_sql(self, e, sep=""),
1060            exp.If: if_sql(name="IFF", false_value="NULL"),
1061            exp.JSONExtractArray: _json_extract_value_array_sql,
1062            exp.JSONExtractScalar: lambda self, e: self.func(
1063                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
1064            ),
1065            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
1066            exp.JSONPathRoot: lambda *_: "",
1067            exp.JSONValueArray: _json_extract_value_array_sql,
1068            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
1069                rename_func("EDITDISTANCE")
1070            ),
1071            exp.LocationProperty: lambda self, e: f"LOCATION={self.sql(e, 'this')}",
1072            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
1073            exp.LogicalOr: rename_func("BOOLOR_AGG"),
1074            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1075            exp.MakeInterval: no_make_interval_sql,
1076            exp.Max: max_or_greatest,
1077            exp.Min: min_or_least,
1078            exp.ParseJSON: lambda self, e: self.func(
1079                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
1080            ),
1081            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1082            exp.PercentileCont: transforms.preprocess(
1083                [transforms.add_within_group_for_percentiles]
1084            ),
1085            exp.PercentileDisc: transforms.preprocess(
1086                [transforms.add_within_group_for_percentiles]
1087            ),
1088            exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]),
1089            exp.RegexpExtract: _regexpextract_sql,
1090            exp.RegexpExtractAll: _regexpextract_sql,
1091            exp.RegexpILike: _regexpilike_sql,
1092            exp.Rand: rename_func("RANDOM"),
1093            exp.Select: transforms.preprocess(
1094                [
1095                    transforms.eliminate_window_clause,
1096                    transforms.eliminate_distinct_on,
1097                    transforms.explode_projection_to_unnest(),
1098                    transforms.eliminate_semi_and_anti_joins,
1099                    _transform_generate_date_array,
1100                ]
1101            ),
1102            exp.SHA: rename_func("SHA1"),
1103            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
1104            exp.StartsWith: rename_func("STARTSWITH"),
1105            exp.EndsWith: rename_func("ENDSWITH"),
1106            exp.StrPosition: lambda self, e: strposition_sql(
1107                self, e, func_name="CHARINDEX", supports_position=True
1108            ),
1109            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
1110            exp.Stuff: rename_func("INSERT"),
1111            exp.StPoint: rename_func("ST_MAKEPOINT"),
1112            exp.TimeAdd: date_delta_sql("TIMEADD"),
1113            exp.Timestamp: no_timestamp_sql,
1114            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
1115            exp.TimestampDiff: lambda self, e: self.func(
1116                "TIMESTAMPDIFF", e.unit, e.expression, e.this
1117            ),
1118            exp.TimestampTrunc: timestamptrunc_sql(),
1119            exp.TimeStrToTime: timestrtotime_sql,
1120            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
1121            exp.ToArray: rename_func("TO_ARRAY"),
1122            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
1123            exp.ToDouble: rename_func("TO_DOUBLE"),
1124            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
1125            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
1126            exp.TsOrDsToDate: lambda self, e: self.func(
1127                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
1128            ),
1129            exp.TsOrDsToTime: lambda self, e: self.func(
1130                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
1131            ),
1132            exp.Unhex: rename_func("HEX_DECODE_BINARY"),
1133            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
1134            exp.Uuid: rename_func("UUID_STRING"),
1135            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1136            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
1137            exp.Xor: rename_func("BOOLXOR"),
1138        }
1139
1140        SUPPORTED_JSON_PATH_PARTS = {
1141            exp.JSONPathKey,
1142            exp.JSONPathRoot,
1143            exp.JSONPathSubscript,
1144        }
1145
1146        TYPE_MAPPING = {
1147            **generator.Generator.TYPE_MAPPING,
1148            exp.DataType.Type.NESTED: "OBJECT",
1149            exp.DataType.Type.STRUCT: "OBJECT",
1150            exp.DataType.Type.BIGDECIMAL: "DOUBLE",
1151        }
1152
1153        TOKEN_MAPPING = {
1154            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
1155        }
1156
1157        PROPERTIES_LOCATION = {
1158            **generator.Generator.PROPERTIES_LOCATION,
1159            exp.CredentialsProperty: exp.Properties.Location.POST_WITH,
1160            exp.LocationProperty: exp.Properties.Location.POST_WITH,
1161            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1162            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1163            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1164        }
1165
1166        UNSUPPORTED_VALUES_EXPRESSIONS = {
1167            exp.Map,
1168            exp.StarMap,
1169            exp.Struct,
1170            exp.VarMap,
1171        }
1172
1173        RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS = (exp.ArrayAgg,)
1174
1175        def with_properties(self, properties: exp.Properties) -> str:
1176            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1177
1178        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1179            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1180                values_as_table = False
1181
1182            return super().values_sql(expression, values_as_table=values_as_table)
1183
1184        def datatype_sql(self, expression: exp.DataType) -> str:
1185            expressions = expression.expressions
1186            if (
1187                expressions
1188                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1189                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1190            ):
1191                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1192                return "OBJECT"
1193
1194            return super().datatype_sql(expression)
1195
1196        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1197            return self.func(
1198                "TO_NUMBER",
1199                expression.this,
1200                expression.args.get("format"),
1201                expression.args.get("precision"),
1202                expression.args.get("scale"),
1203            )
1204
1205        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1206            milli = expression.args.get("milli")
1207            if milli is not None:
1208                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1209                expression.set("nano", milli_to_nano)
1210
1211            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1212
1213        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1214            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1215                return self.func("TO_GEOGRAPHY", expression.this)
1216            if expression.is_type(exp.DataType.Type.GEOMETRY):
1217                return self.func("TO_GEOMETRY", expression.this)
1218
1219            return super().cast_sql(expression, safe_prefix=safe_prefix)
1220
1221        def trycast_sql(self, expression: exp.TryCast) -> str:
1222            value = expression.this
1223
1224            if value.type is None:
1225                from sqlglot.optimizer.annotate_types import annotate_types
1226
1227                value = annotate_types(value, dialect=self.dialect)
1228
1229            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1230                return super().trycast_sql(expression)
1231
1232            # TRY_CAST only works for string values in Snowflake
1233            return self.cast_sql(expression)
1234
1235        def log_sql(self, expression: exp.Log) -> str:
1236            if not expression.expression:
1237                return self.func("LN", expression.this)
1238
1239            return super().log_sql(expression)
1240
1241        def unnest_sql(self, expression: exp.Unnest) -> str:
1242            unnest_alias = expression.args.get("alias")
1243            offset = expression.args.get("offset")
1244
1245            unnest_alias_columns = unnest_alias.columns if unnest_alias else []
1246            value = seq_get(unnest_alias_columns, 0) or exp.to_identifier("value")
1247
1248            columns = [
1249                exp.to_identifier("seq"),
1250                exp.to_identifier("key"),
1251                exp.to_identifier("path"),
1252                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1253                value,
1254                exp.to_identifier("this"),
1255            ]
1256
1257            if unnest_alias:
1258                unnest_alias.set("columns", columns)
1259            else:
1260                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1261
1262            table_input = self.sql(expression.expressions[0])
1263            if not table_input.startswith("INPUT =>"):
1264                table_input = f"INPUT => {table_input}"
1265
1266            explode = f"TABLE(FLATTEN({table_input}))"
1267            alias = self.sql(unnest_alias)
1268            alias = f" AS {alias}" if alias else ""
1269            value = "" if isinstance(expression.parent, (exp.From, exp.Join)) else f"{value} FROM "
1270
1271            return f"{value}{explode}{alias}"
1272
1273        def show_sql(self, expression: exp.Show) -> str:
1274            terse = "TERSE " if expression.args.get("terse") else ""
1275            history = " HISTORY" if expression.args.get("history") else ""
1276            like = self.sql(expression, "like")
1277            like = f" LIKE {like}" if like else ""
1278
1279            scope = self.sql(expression, "scope")
1280            scope = f" {scope}" if scope else ""
1281
1282            scope_kind = self.sql(expression, "scope_kind")
1283            if scope_kind:
1284                scope_kind = f" IN {scope_kind}"
1285
1286            starts_with = self.sql(expression, "starts_with")
1287            if starts_with:
1288                starts_with = f" STARTS WITH {starts_with}"
1289
1290            limit = self.sql(expression, "limit")
1291
1292            from_ = self.sql(expression, "from")
1293            if from_:
1294                from_ = f" FROM {from_}"
1295
1296            privileges = self.expressions(expression, key="privileges", flat=True)
1297            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1298
1299            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
1300
1301        def describe_sql(self, expression: exp.Describe) -> str:
1302            # Default to table if kind is unknown
1303            kind_value = expression.args.get("kind") or "TABLE"
1304            kind = f" {kind_value}" if kind_value else ""
1305            this = f" {self.sql(expression, 'this')}"
1306            expressions = self.expressions(expression, flat=True)
1307            expressions = f" {expressions}" if expressions else ""
1308            return f"DESCRIBE{kind}{this}{expressions}"
1309
1310        def generatedasidentitycolumnconstraint_sql(
1311            self, expression: exp.GeneratedAsIdentityColumnConstraint
1312        ) -> str:
1313            start = expression.args.get("start")
1314            start = f" START {start}" if start else ""
1315            increment = expression.args.get("increment")
1316            increment = f" INCREMENT {increment}" if increment else ""
1317            return f"AUTOINCREMENT{start}{increment}"
1318
1319        def cluster_sql(self, expression: exp.Cluster) -> str:
1320            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1321
1322        def struct_sql(self, expression: exp.Struct) -> str:
1323            keys = []
1324            values = []
1325
1326            for i, e in enumerate(expression.expressions):
1327                if isinstance(e, exp.PropertyEQ):
1328                    keys.append(
1329                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1330                    )
1331                    values.append(e.expression)
1332                else:
1333                    keys.append(exp.Literal.string(f"_{i}"))
1334                    values.append(e)
1335
1336            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1337
1338        @unsupported_args("weight", "accuracy")
1339        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1340            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1341
1342        def alterset_sql(self, expression: exp.AlterSet) -> str:
1343            exprs = self.expressions(expression, flat=True)
1344            exprs = f" {exprs}" if exprs else ""
1345            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1346            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1347            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1348            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1349            tag = self.expressions(expression, key="tag", flat=True)
1350            tag = f" TAG {tag}" if tag else ""
1351
1352            return f"SET{exprs}{file_format}{copy_options}{tag}"
1353
1354        def strtotime_sql(self, expression: exp.StrToTime):
1355            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1356            return self.func(
1357                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1358            )
1359
1360        def timestampsub_sql(self, expression: exp.TimestampSub):
1361            return self.sql(
1362                exp.TimestampAdd(
1363                    this=expression.this,
1364                    expression=expression.expression * -1,
1365                    unit=expression.unit,
1366                )
1367            )
1368
1369        def jsonextract_sql(self, expression: exp.JSONExtract):
1370            this = expression.this
1371
1372            # JSON strings are valid coming from other dialects such as BQ
1373            return self.func(
1374                "GET_PATH",
1375                exp.ParseJSON(this=this) if this.is_string else this,
1376                expression.expression,
1377            )
1378
1379        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1380            this = expression.this
1381            if not isinstance(this, exp.TsOrDsToTimestamp):
1382                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1383
1384            return self.func("TO_CHAR", this, self.format_time(expression))
1385
1386        def datesub_sql(self, expression: exp.DateSub) -> str:
1387            value = expression.expression
1388            if value:
1389                value.replace(value * (-1))
1390            else:
1391                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1392
1393            return date_delta_sql("DATEADD")(self, expression)
1394
1395        def select_sql(self, expression: exp.Select) -> str:
1396            limit = expression.args.get("limit")
1397            offset = expression.args.get("offset")
1398            if offset and not limit:
1399                expression.limit(exp.Null(), copy=False)
1400            return super().select_sql(expression)
1401
1402        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
1403            is_materialized = expression.find(exp.MaterializedProperty)
1404            copy_grants_property = expression.find(exp.CopyGrantsProperty)
1405
1406            if expression.kind == "VIEW" and is_materialized and copy_grants_property:
1407                # For materialized views, COPY GRANTS is located *before* the columns list
1408                # This is in contrast to normal views where COPY GRANTS is located *after* the columns list
1409                # We default CopyGrantsProperty to POST_SCHEMA which means we need to output it POST_NAME if a materialized view is detected
1410                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-materialized-view#syntax
1411                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-view#syntax
1412                post_schema_properties = locations[exp.Properties.Location.POST_SCHEMA]
1413                post_schema_properties.pop(post_schema_properties.index(copy_grants_property))
1414
1415                this_name = self.sql(expression.this, "this")
1416                copy_grants = self.sql(copy_grants_property)
1417                this_schema = self.schema_columns_sql(expression.this)
1418                this_schema = f"{self.sep()}{this_schema}" if this_schema else ""
1419
1420                return f"{this_name}{self.sep()}{copy_grants}{this_schema}"
1421
1422            return super().createable_sql(expression, locations)
1423
1424        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
1425            this = expression.this
1426
1427            # If an ORDER BY clause is present, we need to remove it from ARRAY_AGG
1428            # and add it later as part of the WITHIN GROUP clause
1429            order = this if isinstance(this, exp.Order) else None
1430            if order:
1431                expression.set("this", order.this.pop())
1432
1433            expr_sql = super().arrayagg_sql(expression)
1434
1435            if order:
1436                expr_sql = self.sql(exp.WithinGroup(this=expr_sql, expression=order))
1437
1438            return expr_sql
1439
1440        def array_sql(self, expression: exp.Array) -> str:
1441            expressions = expression.expressions
1442
1443            first_expr = seq_get(expressions, 0)
1444            if isinstance(first_expr, exp.Select):
1445                # SELECT AS STRUCT foo AS alias_foo -> ARRAY_AGG(OBJECT_CONSTRUCT('alias_foo', foo))
1446                if first_expr.text("kind").upper() == "STRUCT":
1447                    object_construct_args = []
1448                    for expr in first_expr.expressions:
1449                        # Alias case: SELECT AS STRUCT foo AS alias_foo -> OBJECT_CONSTRUCT('alias_foo', foo)
1450                        # Column case: SELECT AS STRUCT foo -> OBJECT_CONSTRUCT('foo', foo)
1451                        name = expr.this if isinstance(expr, exp.Alias) else expr
1452
1453                        object_construct_args.extend([exp.Literal.string(expr.alias_or_name), name])
1454
1455                    array_agg = exp.ArrayAgg(
1456                        this=_build_object_construct(args=object_construct_args)
1457                    )
1458
1459                    first_expr.set("kind", None)
1460                    first_expr.set("expressions", [array_agg])
1461
1462                    return self.sql(first_expr.subquery())
1463
1464            return inline_array_sql(self, expression)
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

DATE_PART_MAPPING = {'Y': 'YEAR', 'YY': 'YEAR', 'YYY': 'YEAR', 'YYYY': 'YEAR', 'YR': 'YEAR', 'YEARS': 'YEAR', 'YRS': 'YEAR', 'MM': 'MONTH', 'MON': 'MONTH', 'MONS': 'MONTH', 'MONTHS': 'MONTH', 'D': 'DAY', 'DD': 'DAY', 'DAYS': 'DAY', 'DAYOFMONTH': 'DAY', 'DAY OF WEEK': 'DAYOFWEEK', 'WEEKDAY': 'DAYOFWEEK', 'DOW': 'DAYOFWEEK', 'DW': 'DAYOFWEEK', 'WEEKDAY_ISO': 'DAYOFWEEKISO', 'DOW_ISO': 'DAYOFWEEKISO', 'DW_ISO': 'DAYOFWEEKISO', 'DAY OF YEAR': 'DAYOFYEAR', 'DOY': 'DAYOFYEAR', 'DY': 'DAYOFYEAR', 'W': 'WEEK', 'WK': 'WEEK', 'WEEKOFYEAR': 'WEEK', 'WOY': 'WEEK', 'WY': 'WEEK', 'WEEK_ISO': 'WEEKISO', 'WEEKOFYEARISO': 'WEEKISO', 'WEEKOFYEAR_ISO': 'WEEKISO', 'Q': 'QUARTER', 'QTR': 'QUARTER', 'QTRS': 'QUARTER', 'QUARTERS': 'QUARTER', 'H': 'HOUR', 'HH': 'HOUR', 'HR': 'HOUR', 'HOURS': 'HOUR', 'HRS': 'HOUR', 'M': 'MINUTE', 'MI': 'MINUTE', 'MIN': 'MINUTE', 'MINUTES': 'MINUTE', 'MINS': 'MINUTE', 'S': 'SECOND', 'SEC': 'SECOND', 'SECONDS': 'SECOND', 'SECS': 'SECOND', 'MS': 'MILLISECOND', 'MSEC': 'MILLISECOND', 'MSECS': 'MILLISECOND', 'MSECOND': 'MILLISECOND', 'MSECONDS': 'MILLISECOND', 'MILLISEC': 'MILLISECOND', 'MILLISECS': 'MILLISECOND', 'MILLISECON': 'MILLISECOND', 'MILLISECONDS': 'MILLISECOND', 'US': 'MICROSECOND', 'USEC': 'MICROSECOND', 'USECS': 'MICROSECOND', 'MICROSEC': 'MICROSECOND', 'MICROSECS': 'MICROSECOND', 'USECOND': 'MICROSECOND', 'USECONDS': 'MICROSECOND', 'MICROSECONDS': 'MICROSECOND', 'NS': 'NANOSECOND', 'NSEC': 'NANOSECOND', 'NANOSEC': 'NANOSECOND', 'NSECOND': 'NANOSECOND', 'NSECONDS': 'NANOSECOND', 'NANOSECS': 'NANOSECOND', 'EPOCH_SECOND': 'EPOCH', 'EPOCH_SECONDS': 'EPOCH', 'EPOCH_MILLISECONDS': 'EPOCH_MILLISECOND', 'EPOCH_MICROSECONDS': 'EPOCH_MICROSECOND', 'EPOCH_NANOSECONDS': 'EPOCH_NANOSECOND', 'TZH': 'TIMEZONE_HOUR', 'TZM': 'TIMEZONE_MINUTE', 'DEC': 'DECADE', 'DECS': 'DECADE', 'DECADES': 'DECADE', 'MIL': 'MILLENIUM', 'MILS': 'MILLENIUM', 'MILLENIA': 'MILLENIUM', 'C': 'CENTURY', 'CENT': 'CENTURY', 'CENTS': 'CENTURY', 'CENTURIES': 'CENTURY', 'ISOWEEK': 'WEEKISO'}
def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
381    def quote_identifier(self, expression: E, identify: bool = True) -> E:
382        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
383        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
384        if (
385            isinstance(expression, exp.Identifier)
386            and isinstance(expression.parent, exp.Table)
387            and expression.name.lower() == "dual"
388        ):
389            return expression  # type: ignore
390
391        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'Snowflake.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {'6': {0: True}}}, 'f': {'f': {'6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {'6': {0: True}}}, 'f': {'f': {'6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.JSONPathTokenizer(sqlglot.jsonpath.JSONPathTokenizer):
393    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
394        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
395        SINGLE_TOKENS.pop("$")
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '-': <TokenType.DASH: 'DASH'>, '.': <TokenType.DOT: 'DOT'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '"': <TokenType.QUOTE: 'QUOTE'>, '*': <TokenType.STAR: 'STAR'>}
class Snowflake.Parser(sqlglot.parser.Parser):
397    class Parser(parser.Parser):
398        IDENTIFY_PIVOT_STRINGS = True
399        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
400        COLON_IS_VARIANT_EXTRACT = True
401
402        ID_VAR_TOKENS = {
403            *parser.Parser.ID_VAR_TOKENS,
404            TokenType.MATCH_CONDITION,
405        }
406
407        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
408        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
409
410        COLON_PLACEHOLDER_TOKENS = ID_VAR_TOKENS | {TokenType.NUMBER}
411
412        FUNCTIONS = {
413            **parser.Parser.FUNCTIONS,
414            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
415            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
416            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
417                this=seq_get(args, 1), expression=seq_get(args, 0)
418            ),
419            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
420                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
421                start=seq_get(args, 0),
422                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
423                step=seq_get(args, 2),
424            ),
425            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
426            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
427            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
428            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
429            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
430            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
431            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
432            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
433            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
434            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
435            "DATE_TRUNC": _date_trunc_to_time,
436            "DATEADD": _build_date_time_add(exp.DateAdd),
437            "DATEDIFF": _build_datediff,
438            "DIV0": _build_if_from_div0,
439            "EDITDISTANCE": lambda args: exp.Levenshtein(
440                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
441            ),
442            "FLATTEN": exp.Explode.from_arg_list,
443            "GET_PATH": lambda args, dialect: exp.JSONExtract(
444                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
445            ),
446            "HEX_DECODE_BINARY": exp.Unhex.from_arg_list,
447            "IFF": exp.If.from_arg_list,
448            "LAST_DAY": lambda args: exp.LastDay(
449                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
450            ),
451            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
452            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
453            "NULLIFZERO": _build_if_from_nullifzero,
454            "OBJECT_CONSTRUCT": _build_object_construct,
455            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
456            "REGEXP_REPLACE": _build_regexp_replace,
457            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
458            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
459            "RLIKE": exp.RegexpLike.from_arg_list,
460            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
461            "TABLE": lambda args: exp.TableFromRows(this=seq_get(args, 0)),
462            "TIMEADD": _build_date_time_add(exp.TimeAdd),
463            "TIMEDIFF": _build_datediff,
464            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
465            "TIMESTAMPDIFF": _build_datediff,
466            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
467            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
468            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
469            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
470            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
471            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
472            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
473            "TRY_TO_TIMESTAMP": _build_datetime(
474                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
475            ),
476            "TO_CHAR": build_timetostr_or_tochar,
477            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
478            "TO_NUMBER": lambda args: exp.ToNumber(
479                this=seq_get(args, 0),
480                format=seq_get(args, 1),
481                precision=seq_get(args, 2),
482                scale=seq_get(args, 3),
483            ),
484            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
485            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
486            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
487            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
488            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
489            "TO_VARCHAR": exp.ToChar.from_arg_list,
490            "ZEROIFNULL": _build_if_from_zeroifnull,
491        }
492
493        FUNCTION_PARSERS = {
494            **parser.Parser.FUNCTION_PARSERS,
495            "DATE_PART": lambda self: self._parse_date_part(),
496            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
497            "LISTAGG": lambda self: self._parse_string_agg(),
498        }
499        FUNCTION_PARSERS.pop("TRIM")
500
501        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
502
503        RANGE_PARSERS = {
504            **parser.Parser.RANGE_PARSERS,
505            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
506            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
507        }
508
509        ALTER_PARSERS = {
510            **parser.Parser.ALTER_PARSERS,
511            "UNSET": lambda self: self.expression(
512                exp.Set,
513                tag=self._match_text_seq("TAG"),
514                expressions=self._parse_csv(self._parse_id_var),
515                unset=True,
516            ),
517        }
518
519        STATEMENT_PARSERS = {
520            **parser.Parser.STATEMENT_PARSERS,
521            TokenType.GET: lambda self: self._parse_get(),
522            TokenType.PUT: lambda self: self._parse_put(),
523            TokenType.SHOW: lambda self: self._parse_show(),
524        }
525
526        PROPERTY_PARSERS = {
527            **parser.Parser.PROPERTY_PARSERS,
528            "CREDENTIALS": lambda self: self._parse_credentials_property(),
529            "FILE_FORMAT": lambda self: self._parse_file_format_property(),
530            "LOCATION": lambda self: self._parse_location_property(),
531            "TAG": lambda self: self._parse_tag(),
532            "USING": lambda self: self._match_text_seq("TEMPLATE")
533            and self.expression(exp.UsingTemplateProperty, this=self._parse_statement()),
534        }
535
536        TYPE_CONVERTERS = {
537            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
538            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
539        }
540
541        SHOW_PARSERS = {
542            "DATABASES": _show_parser("DATABASES"),
543            "TERSE DATABASES": _show_parser("DATABASES"),
544            "SCHEMAS": _show_parser("SCHEMAS"),
545            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
546            "OBJECTS": _show_parser("OBJECTS"),
547            "TERSE OBJECTS": _show_parser("OBJECTS"),
548            "TABLES": _show_parser("TABLES"),
549            "TERSE TABLES": _show_parser("TABLES"),
550            "VIEWS": _show_parser("VIEWS"),
551            "TERSE VIEWS": _show_parser("VIEWS"),
552            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
553            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
554            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
555            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
556            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
557            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
558            "SEQUENCES": _show_parser("SEQUENCES"),
559            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
560            "STAGES": _show_parser("STAGES"),
561            "COLUMNS": _show_parser("COLUMNS"),
562            "USERS": _show_parser("USERS"),
563            "TERSE USERS": _show_parser("USERS"),
564            "FILE FORMATS": _show_parser("FILE FORMATS"),
565            "FUNCTIONS": _show_parser("FUNCTIONS"),
566            "PROCEDURES": _show_parser("PROCEDURES"),
567            "WAREHOUSES": _show_parser("WAREHOUSES"),
568        }
569
570        CONSTRAINT_PARSERS = {
571            **parser.Parser.CONSTRAINT_PARSERS,
572            "WITH": lambda self: self._parse_with_constraint(),
573            "MASKING": lambda self: self._parse_with_constraint(),
574            "PROJECTION": lambda self: self._parse_with_constraint(),
575            "TAG": lambda self: self._parse_with_constraint(),
576        }
577
578        STAGED_FILE_SINGLE_TOKENS = {
579            TokenType.DOT,
580            TokenType.MOD,
581            TokenType.SLASH,
582        }
583
584        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
585
586        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
587
588        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
589
590        LAMBDAS = {
591            **parser.Parser.LAMBDAS,
592            TokenType.ARROW: lambda self, expressions: self.expression(
593                exp.Lambda,
594                this=self._replace_lambda(
595                    self._parse_assignment(),
596                    expressions,
597                ),
598                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
599            ),
600        }
601
602        def _parse_use(self) -> exp.Use:
603            if self._match_text_seq("SECONDARY", "ROLES"):
604                this = self._match_texts(("ALL", "NONE")) and exp.var(self._prev.text.upper())
605                roles = None if this else self._parse_csv(lambda: self._parse_table(schema=False))
606                return self.expression(
607                    exp.Use, kind="SECONDARY ROLES", this=this, expressions=roles
608                )
609
610            return super()._parse_use()
611
612        def _negate_range(
613            self, this: t.Optional[exp.Expression] = None
614        ) -> t.Optional[exp.Expression]:
615            if not this:
616                return this
617
618            query = this.args.get("query")
619            if isinstance(this, exp.In) and isinstance(query, exp.Query):
620                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
621                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
622                # which can produce different results (most likely a SnowFlake bug).
623                #
624                # https://docs.snowflake.com/en/sql-reference/functions/in
625                # Context: https://github.com/tobymao/sqlglot/issues/3890
626                return self.expression(
627                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
628                )
629
630            return self.expression(exp.Not, this=this)
631
632        def _parse_tag(self) -> exp.Tags:
633            return self.expression(
634                exp.Tags,
635                expressions=self._parse_wrapped_csv(self._parse_property),
636            )
637
638        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
639            if self._prev.token_type != TokenType.WITH:
640                self._retreat(self._index - 1)
641
642            if self._match_text_seq("MASKING", "POLICY"):
643                policy = self._parse_column()
644                return self.expression(
645                    exp.MaskingPolicyColumnConstraint,
646                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
647                    expressions=self._match(TokenType.USING)
648                    and self._parse_wrapped_csv(self._parse_id_var),
649                )
650            if self._match_text_seq("PROJECTION", "POLICY"):
651                policy = self._parse_column()
652                return self.expression(
653                    exp.ProjectionPolicyColumnConstraint,
654                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
655                )
656            if self._match(TokenType.TAG):
657                return self._parse_tag()
658
659            return None
660
661        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
662            if self._match(TokenType.TAG):
663                return self._parse_tag()
664
665            return super()._parse_with_property()
666
667        def _parse_create(self) -> exp.Create | exp.Command:
668            expression = super()._parse_create()
669            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
670                # Replace the Table node with the enclosed Identifier
671                expression.this.replace(expression.this.this)
672
673            return expression
674
675        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
676        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
677        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
678            this = self._parse_var() or self._parse_type()
679
680            if not this:
681                return None
682
683            self._match(TokenType.COMMA)
684            expression = self._parse_bitwise()
685            this = map_date_part(this)
686            name = this.name.upper()
687
688            if name.startswith("EPOCH"):
689                if name == "EPOCH_MILLISECOND":
690                    scale = 10**3
691                elif name == "EPOCH_MICROSECOND":
692                    scale = 10**6
693                elif name == "EPOCH_NANOSECOND":
694                    scale = 10**9
695                else:
696                    scale = None
697
698                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
699                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
700
701                if scale:
702                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
703
704                return to_unix
705
706            return self.expression(exp.Extract, this=this, expression=expression)
707
708        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
709            if is_map:
710                # Keys are strings in Snowflake's objects, see also:
711                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
712                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
713                return self._parse_slice(self._parse_string())
714
715            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
716
717        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
718            lateral = super()._parse_lateral()
719            if not lateral:
720                return lateral
721
722            if isinstance(lateral.this, exp.Explode):
723                table_alias = lateral.args.get("alias")
724                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
725                if table_alias and not table_alias.args.get("columns"):
726                    table_alias.set("columns", columns)
727                elif not table_alias:
728                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
729
730            return lateral
731
732        def _parse_table_parts(
733            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
734        ) -> exp.Table:
735            # https://docs.snowflake.com/en/user-guide/querying-stage
736            if self._match(TokenType.STRING, advance=False):
737                table = self._parse_string()
738            elif self._match_text_seq("@", advance=False):
739                table = self._parse_location_path()
740            else:
741                table = None
742
743            if table:
744                file_format = None
745                pattern = None
746
747                wrapped = self._match(TokenType.L_PAREN)
748                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
749                    if self._match_text_seq("FILE_FORMAT", "=>"):
750                        file_format = self._parse_string() or super()._parse_table_parts(
751                            is_db_reference=is_db_reference
752                        )
753                    elif self._match_text_seq("PATTERN", "=>"):
754                        pattern = self._parse_string()
755                    else:
756                        break
757
758                    self._match(TokenType.COMMA)
759
760                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
761            else:
762                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
763
764            return table
765
766        def _parse_table(
767            self,
768            schema: bool = False,
769            joins: bool = False,
770            alias_tokens: t.Optional[t.Collection[TokenType]] = None,
771            parse_bracket: bool = False,
772            is_db_reference: bool = False,
773            parse_partition: bool = False,
774        ) -> t.Optional[exp.Expression]:
775            table = super()._parse_table(
776                schema=schema,
777                joins=joins,
778                alias_tokens=alias_tokens,
779                parse_bracket=parse_bracket,
780                is_db_reference=is_db_reference,
781                parse_partition=parse_partition,
782            )
783            if isinstance(table, exp.Table) and isinstance(table.this, exp.TableFromRows):
784                table_from_rows = table.this
785                for arg in exp.TableFromRows.arg_types:
786                    if arg != "this":
787                        table_from_rows.set(arg, table.args.get(arg))
788
789                table = table_from_rows
790
791            return table
792
793        def _parse_id_var(
794            self,
795            any_token: bool = True,
796            tokens: t.Optional[t.Collection[TokenType]] = None,
797        ) -> t.Optional[exp.Expression]:
798            if self._match_text_seq("IDENTIFIER", "("):
799                identifier = (
800                    super()._parse_id_var(any_token=any_token, tokens=tokens)
801                    or self._parse_string()
802                )
803                self._match_r_paren()
804                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
805
806            return super()._parse_id_var(any_token=any_token, tokens=tokens)
807
808        def _parse_show_snowflake(self, this: str) -> exp.Show:
809            scope = None
810            scope_kind = None
811
812            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
813            # which is syntactically valid but has no effect on the output
814            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
815
816            history = self._match_text_seq("HISTORY")
817
818            like = self._parse_string() if self._match(TokenType.LIKE) else None
819
820            if self._match(TokenType.IN):
821                if self._match_text_seq("ACCOUNT"):
822                    scope_kind = "ACCOUNT"
823                elif self._match_text_seq("CLASS"):
824                    scope_kind = "CLASS"
825                    scope = self._parse_table_parts()
826                elif self._match_text_seq("APPLICATION"):
827                    scope_kind = "APPLICATION"
828                    if self._match_text_seq("PACKAGE"):
829                        scope_kind += " PACKAGE"
830                    scope = self._parse_table_parts()
831                elif self._match_set(self.DB_CREATABLES):
832                    scope_kind = self._prev.text.upper()
833                    if self._curr:
834                        scope = self._parse_table_parts()
835                elif self._curr:
836                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
837                    scope = self._parse_table_parts()
838
839            return self.expression(
840                exp.Show,
841                **{
842                    "terse": terse,
843                    "this": this,
844                    "history": history,
845                    "like": like,
846                    "scope": scope,
847                    "scope_kind": scope_kind,
848                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
849                    "limit": self._parse_limit(),
850                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
851                    "privileges": self._match_text_seq("WITH", "PRIVILEGES")
852                    and self._parse_csv(lambda: self._parse_var(any_token=True, upper=True)),
853                },
854            )
855
856        def _parse_put(self) -> exp.Put | exp.Command:
857            if self._curr.token_type != TokenType.STRING:
858                return self._parse_as_command(self._prev)
859
860            return self.expression(
861                exp.Put,
862                this=self._parse_string(),
863                target=self._parse_location_path(),
864                properties=self._parse_properties(),
865            )
866
867        def _parse_get(self) -> t.Optional[exp.Expression]:
868            start = self._prev
869
870            # If we detect GET( then we need to parse a function, not a statement
871            if self._match(TokenType.L_PAREN):
872                self._retreat(self._index - 2)
873                return self._parse_expression()
874
875            target = self._parse_location_path()
876
877            # Parse as command if unquoted file path
878            if self._curr.token_type == TokenType.URI_START:
879                return self._parse_as_command(start)
880
881            return self.expression(
882                exp.Get,
883                this=self._parse_string(),
884                target=target,
885                properties=self._parse_properties(),
886            )
887
888        def _parse_location_property(self) -> exp.LocationProperty:
889            self._match(TokenType.EQ)
890            return self.expression(exp.LocationProperty, this=self._parse_location_path())
891
892        def _parse_file_location(self) -> t.Optional[exp.Expression]:
893            # Parse either a subquery or a staged file
894            return (
895                self._parse_select(table=True, parse_subquery_alias=False)
896                if self._match(TokenType.L_PAREN, advance=False)
897                else self._parse_table_parts()
898            )
899
900        def _parse_location_path(self) -> exp.Var:
901            start = self._curr
902            self._advance_any(ignore_reserved=True)
903
904            # We avoid consuming a comma token because external tables like @foo and @bar
905            # can be joined in a query with a comma separator, as well as closing paren
906            # in case of subqueries
907            while self._is_connected() and not self._match_set(
908                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
909            ):
910                self._advance_any(ignore_reserved=True)
911
912            return exp.var(self._find_sql(start, self._prev))
913
914        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
915            this = super()._parse_lambda_arg()
916
917            if not this:
918                return this
919
920            typ = self._parse_types()
921
922            if typ:
923                return self.expression(exp.Cast, this=this, to=typ)
924
925            return this
926
927        def _parse_foreign_key(self) -> exp.ForeignKey:
928            # inlineFK, the REFERENCES columns are implied
929            if self._match(TokenType.REFERENCES, advance=False):
930                return self.expression(exp.ForeignKey)
931
932            # outoflineFK, explicitly names the columns
933            return super()._parse_foreign_key()
934
935        def _parse_file_format_property(self) -> exp.FileFormatProperty:
936            self._match(TokenType.EQ)
937            if self._match(TokenType.L_PAREN, advance=False):
938                expressions = self._parse_wrapped_options()
939            else:
940                expressions = [self._parse_format_name()]
941
942            return self.expression(
943                exp.FileFormatProperty,
944                expressions=expressions,
945            )
946
947        def _parse_credentials_property(self) -> exp.CredentialsProperty:
948            return self.expression(
949                exp.CredentialsProperty,
950                expressions=self._parse_wrapped_options(),
951            )

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.SHOW: 'SHOW'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.SINK: 'SINK'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.CHAR: 'CHAR'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MAP: 'MAP'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.APPLY: 'APPLY'>, <TokenType.RENAME: 'RENAME'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.UINT128: 'UINT128'>, <TokenType.STAGE: 'STAGE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.DIV: 'DIV'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.USE: 'USE'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.PUT: 'PUT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TIME: 'TIME'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.IS: 'IS'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.JSON: 'JSON'>, <TokenType.VAR: 'VAR'>, <TokenType.CUBE: 'CUBE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.INT128: 'INT128'>, <TokenType.CACHE: 'CACHE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.UINT: 'UINT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.BIT: 'BIT'>, <TokenType.COPY: 'COPY'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.BLOB: 'BLOB'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.UUID: 'UUID'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.CASE: 'CASE'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ANY: 'ANY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.END: 'END'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.NEXT: 'NEXT'>, <TokenType.ROW: 'ROW'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.ASC: 'ASC'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.ANTI: 'ANTI'>, <TokenType.BINARY: 'BINARY'>, <TokenType.RING: 'RING'>, <TokenType.TAG: 'TAG'>, <TokenType.RANGE: 'RANGE'>, <TokenType.MODEL: 'MODEL'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.GET: 'GET'>, <TokenType.POINT: 'POINT'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.TOP: 'TOP'>, <TokenType.UINT256: 'UINT256'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ROWS: 'ROWS'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FIRST: 'FIRST'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.TRUE: 'TRUE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ALL: 'ALL'>, <TokenType.KILL: 'KILL'>, <TokenType.LEFT: 'LEFT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.NULL: 'NULL'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.FULL: 'FULL'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.SET: 'SET'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.LIST: 'LIST'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.SUPER: 'SUPER'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.ASOF: 'ASOF'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.INET: 'INET'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.VOID: 'VOID'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.NAME: 'NAME'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.INT: 'INT'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.DETACH: 'DETACH'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.INT256: 'INT256'>, <TokenType.DELETE: 'DELETE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.DATE: 'DATE'>, <TokenType.IPV4: 'IPV4'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.KEEP: 'KEEP'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.SOME: 'SOME'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DESC: 'DESC'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>}
TABLE_ALIAS_TOKENS = {<TokenType.SHOW: 'SHOW'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.SINK: 'SINK'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.CHAR: 'CHAR'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MAP: 'MAP'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.RENAME: 'RENAME'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.UINT128: 'UINT128'>, <TokenType.STAGE: 'STAGE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.DIV: 'DIV'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.USE: 'USE'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.PUT: 'PUT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TIME: 'TIME'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.IS: 'IS'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.JSON: 'JSON'>, <TokenType.VAR: 'VAR'>, <TokenType.CUBE: 'CUBE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.INT128: 'INT128'>, <TokenType.CACHE: 'CACHE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.UINT: 'UINT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.BIT: 'BIT'>, <TokenType.COPY: 'COPY'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.BLOB: 'BLOB'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.UUID: 'UUID'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.CASE: 'CASE'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ANY: 'ANY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.END: 'END'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.NEXT: 'NEXT'>, <TokenType.ROW: 'ROW'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.MERGE: 'MERGE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.ASC: 'ASC'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.SEMI: 'SEMI'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.ANTI: 'ANTI'>, <TokenType.BINARY: 'BINARY'>, <TokenType.RING: 'RING'>, <TokenType.TAG: 'TAG'>, <TokenType.RANGE: 'RANGE'>, <TokenType.MODEL: 'MODEL'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.GET: 'GET'>, <TokenType.POINT: 'POINT'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.TOP: 'TOP'>, <TokenType.UINT256: 'UINT256'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ROWS: 'ROWS'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FIRST: 'FIRST'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.TRUE: 'TRUE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ALL: 'ALL'>, <TokenType.KILL: 'KILL'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.NULL: 'NULL'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.SET: 'SET'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.LIST: 'LIST'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.SUPER: 'SUPER'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.INET: 'INET'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.VOID: 'VOID'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.NAME: 'NAME'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.INT: 'INT'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.DETACH: 'DETACH'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.INT256: 'INT256'>, <TokenType.DELETE: 'DELETE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.DATE: 'DATE'>, <TokenType.IPV4: 'IPV4'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.SOME: 'SOME'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DESC: 'DESC'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>}
COLON_PLACEHOLDER_TOKENS = {<TokenType.SHOW: 'SHOW'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.SINK: 'SINK'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.CHAR: 'CHAR'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MAP: 'MAP'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.APPLY: 'APPLY'>, <TokenType.RENAME: 'RENAME'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.UINT128: 'UINT128'>, <TokenType.STAGE: 'STAGE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.DIV: 'DIV'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.USE: 'USE'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.PUT: 'PUT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TIME: 'TIME'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.IS: 'IS'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.JSON: 'JSON'>, <TokenType.VAR: 'VAR'>, <TokenType.CUBE: 'CUBE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.INT128: 'INT128'>, <TokenType.CACHE: 'CACHE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.UINT: 'UINT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.BIT: 'BIT'>, <TokenType.COPY: 'COPY'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.BLOB: 'BLOB'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.UUID: 'UUID'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.CASE: 'CASE'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ANY: 'ANY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.NUMBER: 'NUMBER'>, <TokenType.FINAL: 'FINAL'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.END: 'END'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.NEXT: 'NEXT'>, <TokenType.ROW: 'ROW'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.ASC: 'ASC'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.ANTI: 'ANTI'>, <TokenType.BINARY: 'BINARY'>, <TokenType.RING: 'RING'>, <TokenType.TAG: 'TAG'>, <TokenType.RANGE: 'RANGE'>, <TokenType.MODEL: 'MODEL'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.GET: 'GET'>, <TokenType.POINT: 'POINT'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.TOP: 'TOP'>, <TokenType.UINT256: 'UINT256'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ROWS: 'ROWS'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FIRST: 'FIRST'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.TRUE: 'TRUE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ALL: 'ALL'>, <TokenType.KILL: 'KILL'>, <TokenType.LEFT: 'LEFT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.NULL: 'NULL'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.FULL: 'FULL'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.SET: 'SET'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.LIST: 'LIST'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.SUPER: 'SUPER'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.ASOF: 'ASOF'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.INET: 'INET'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.VOID: 'VOID'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.NAME: 'NAME'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.INT: 'INT'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.DETACH: 'DETACH'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.INT256: 'INT256'>, <TokenType.DELETE: 'DELETE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.DATE: 'DATE'>, <TokenType.IPV4: 'IPV4'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.KEEP: 'KEEP'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.SOME: 'SOME'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DESC: 'DESC'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.And'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONCAT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcatAgg'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_INTERSECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayIntersect'>>, 'ARRAY_INTERSECTION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayIntersect'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_REMOVE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayRemove'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Contains'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CONVERT_TO_CHARSET': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConvertToCharset'>>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_SCHEMA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentSchema'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATE_BIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateBin'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'ENDS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.EndsWith'>>, 'ENDSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.EndsWith'>>, 'EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exists'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FEATURES_AT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FeaturesAtTime'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'INT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Int64'>>, 'IS_ASCII': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsAscii'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExists'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_B_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBObjectAgg'>>, 'J_S_O_N_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONCast'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractArray'>>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'J_S_O_N_VALUE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONValueArray'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'CHAR_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'CHARACTER_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAKE_INTERVAL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MakeInterval'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MEDIAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Median'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Or'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_EXTRACT_ALL': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'ST_DISTANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StDistance'>>, 'ST_POINT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StPoint'>>, 'ST_MAKEPOINT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StPoint'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.String'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRTOK_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <function build_timetostr_or_tochar>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDatetime'>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNICODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unicode'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_SECONDS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixSeconds'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'XMLELEMENT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLElement'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'STRPOS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'CHARINDEX': <function Parser.<lambda>>, 'INSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'LOCATE': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function _build_bitwise.<locals>._builder>, 'BIT_XOR': <function _build_bitwise.<locals>._builder>, 'BITOR': <function _build_bitwise.<locals>._builder>, 'BIT_OR': <function _build_bitwise.<locals>._builder>, 'BITSHIFTLEFT': <function _build_bitwise.<locals>._builder>, 'BIT_SHIFTLEFT': <function _build_bitwise.<locals>._builder>, 'BITSHIFTRIGHT': <function _build_bitwise.<locals>._builder>, 'BIT_SHIFTRIGHT': <function _build_bitwise.<locals>._builder>, 'BOOLXOR': <function _build_bitwise.<locals>._builder>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'EDITDISTANCE': <function Snowflake.Parser.<lambda>>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'HEX_DECODE_BINARY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_SUBSTR_ALL': <function _build_regexp_extract.<locals>._builder>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TABLE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TIMESTAMPNTZFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_NTZ_FROM_PARTS': <function build_timestamp_from_parts>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIME': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'ARG_MAX': <function Parser.<dictcomp>.<lambda>>, 'ARGMAX': <function Parser.<dictcomp>.<lambda>>, 'MAX_BY': <function Parser.<dictcomp>.<lambda>>, 'ARG_MIN': <function Parser.<dictcomp>.<lambda>>, 'ARGMIN': <function Parser.<dictcomp>.<lambda>>, 'MIN_BY': <function Parser.<dictcomp>.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CEIL': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'FLOOR': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'XMLELEMENT': <function Parser.<lambda>>, 'XMLTABLE': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>, 'LISTAGG': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SWAP': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.ANALYZE: 'ANALYZE'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UNPIVOT: 'UNPIVOT'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.GET: 'GET'>: <function Snowflake.Parser.<lambda>>, <TokenType.PUT: 'PUT'>: <function Snowflake.Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'ENVIRONMENT': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Snowflake.Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'CREDENTIALS': <function Snowflake.Parser.<lambda>>, 'FILE_FORMAT': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'DATABASES': <function _show_parser.<locals>._parse>, 'TERSE DATABASES': <function _show_parser.<locals>._parse>, 'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'STAGES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>, 'FILE FORMATS': <function _show_parser.<locals>._parse>, 'FUNCTIONS': <function _show_parser.<locals>._parse>, 'PROCEDURES': <function _show_parser.<locals>._parse>, 'WAREHOUSES': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WATERMARK': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'BUCKET': <function Parser.<lambda>>, 'TRUNCATE': <function Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.DOT: 'DOT'>, <TokenType.SLASH: 'SLASH'>, <TokenType.MOD: 'MOD'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'IMPORTED KEYS', 'UNIQUE KEYS', 'TABLES', 'SEQUENCES', 'VIEWS', 'OBJECTS'}
NON_TABLE_CREATABLES = {'STORAGE INTEGRATION', 'WAREHOUSE', 'STREAMLIT', 'TAG'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'DATABASES': {0: True}, 'TERSE': {'DATABASES': {0: True}, 'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'STAGES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}, 'FILE': {'FORMATS': {0: True}}, 'FUNCTIONS': {0: True}, 'PROCEDURES': {0: True}, 'WAREHOUSES': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
PIPE_SYNTAX_TRANSFORM_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
WINDOW_EXCLUDE_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
ANALYZE_STYLES
ANALYZE_EXPRESSION_PARSERS
PARTITION_KEYWORDS
AMBIGUOUS_ALIAS_TOKENS
OPERATION_MODIFIERS
RECURSIVE_CTE_SEARCH_KIND
MODIFIABLES
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
OPTIONAL_ALIAS_TOKEN_CTE
ALTER_RENAME_REQUIRES_COLUMN
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
parse_set_operation
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
953    class Tokenizer(tokens.Tokenizer):
954        STRING_ESCAPES = ["\\", "'"]
955        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
956        RAW_STRINGS = ["$$"]
957        COMMENTS = ["--", "//", ("/*", "*/")]
958        NESTED_COMMENTS = False
959
960        KEYWORDS = {
961            **tokens.Tokenizer.KEYWORDS,
962            "FILE://": TokenType.URI_START,
963            "BYTEINT": TokenType.INT,
964            "EXCLUDE": TokenType.EXCEPT,
965            "FILE FORMAT": TokenType.FILE_FORMAT,
966            "GET": TokenType.GET,
967            "ILIKE ANY": TokenType.ILIKE_ANY,
968            "LIKE ANY": TokenType.LIKE_ANY,
969            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
970            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
971            "MINUS": TokenType.EXCEPT,
972            "NCHAR VARYING": TokenType.VARCHAR,
973            "PUT": TokenType.PUT,
974            "REMOVE": TokenType.COMMAND,
975            "RM": TokenType.COMMAND,
976            "SAMPLE": TokenType.TABLE_SAMPLE,
977            "SQL_DOUBLE": TokenType.DOUBLE,
978            "SQL_VARCHAR": TokenType.VARCHAR,
979            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
980            "TAG": TokenType.TAG,
981            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
982            "TOP": TokenType.TOP,
983            "WAREHOUSE": TokenType.WAREHOUSE,
984            "STAGE": TokenType.STAGE,
985            "STREAMLIT": TokenType.STREAMLIT,
986        }
987        KEYWORDS.pop("/*+")
988
989        SINGLE_TOKENS = {
990            **tokens.Tokenizer.SINGLE_TOKENS,
991            "$": TokenType.PARAMETER,
992        }
993
994        VAR_SINGLE_TOKENS = {"$"}
995
996        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '|>': <TokenType.PIPE_GT: 'PIPE_GT'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_SCHEMA': <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NAMESPACE': <TokenType.NAMESPACE: 'NAMESPACE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT256': <TokenType.INT256: 'INT256'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'UINT128': <TokenType.UINT128: 'UINT128'>, 'UINT256': <TokenType.UINT256: 'UINT256'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.ANALYZE: 'ANALYZE'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'FILE://': <TokenType.URI_START: 'URI_START'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'FILE FORMAT': <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, 'GET': <TokenType.GET: 'GET'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.PUT: 'PUT'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STAGE': <TokenType.STAGE: 'STAGE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.RENAME: 'RENAME'>, <TokenType.FETCH: 'FETCH'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 998    class Generator(generator.Generator):
 999        PARAMETER_TOKEN = "$"
1000        MATCHED_BY_SOURCE = False
1001        SINGLE_STRING_INTERVAL = True
1002        JOIN_HINTS = False
1003        TABLE_HINTS = False
1004        QUERY_HINTS = False
1005        AGGREGATE_FILTER_SUPPORTED = False
1006        SUPPORTS_TABLE_COPY = False
1007        COLLATE_IS_FUNC = True
1008        LIMIT_ONLY_LITERALS = True
1009        JSON_KEY_VALUE_PAIR_SEP = ","
1010        INSERT_OVERWRITE = " OVERWRITE INTO"
1011        STRUCT_DELIMITER = ("(", ")")
1012        COPY_PARAMS_ARE_WRAPPED = False
1013        COPY_PARAMS_EQ_REQUIRED = True
1014        STAR_EXCEPT = "EXCLUDE"
1015        SUPPORTS_EXPLODING_PROJECTIONS = False
1016        ARRAY_CONCAT_IS_VAR_LEN = False
1017        SUPPORTS_CONVERT_TIMEZONE = True
1018        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
1019        SUPPORTS_MEDIAN = True
1020        ARRAY_SIZE_NAME = "ARRAY_SIZE"
1021
1022        TRANSFORMS = {
1023            **generator.Generator.TRANSFORMS,
1024            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1025            exp.ArgMax: rename_func("MAX_BY"),
1026            exp.ArgMin: rename_func("MIN_BY"),
1027            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
1028            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
1029            exp.ArrayIntersect: rename_func("ARRAY_INTERSECTION"),
1030            exp.AtTimeZone: lambda self, e: self.func(
1031                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
1032            ),
1033            exp.BitwiseOr: rename_func("BITOR"),
1034            exp.BitwiseXor: rename_func("BITXOR"),
1035            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
1036            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
1037            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
1038            exp.DateAdd: date_delta_sql("DATEADD"),
1039            exp.DateDiff: date_delta_sql("DATEDIFF"),
1040            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
1041            exp.DatetimeDiff: timestampdiff_sql,
1042            exp.DateStrToDate: datestrtodate_sql,
1043            exp.DayOfMonth: rename_func("DAYOFMONTH"),
1044            exp.DayOfWeek: rename_func("DAYOFWEEK"),
1045            exp.DayOfWeekIso: rename_func("DAYOFWEEKISO"),
1046            exp.DayOfYear: rename_func("DAYOFYEAR"),
1047            exp.Explode: rename_func("FLATTEN"),
1048            exp.Extract: lambda self, e: self.func(
1049                "DATE_PART", map_date_part(e.this, self.dialect), e.expression
1050            ),
1051            exp.FileFormatProperty: lambda self,
1052            e: f"FILE_FORMAT=({self.expressions(e, 'expressions', sep=' ')})",
1053            exp.FromTimeZone: lambda self, e: self.func(
1054                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
1055            ),
1056            exp.GenerateSeries: lambda self, e: self.func(
1057                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
1058            ),
1059            exp.GroupConcat: lambda self, e: groupconcat_sql(self, e, sep=""),
1060            exp.If: if_sql(name="IFF", false_value="NULL"),
1061            exp.JSONExtractArray: _json_extract_value_array_sql,
1062            exp.JSONExtractScalar: lambda self, e: self.func(
1063                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
1064            ),
1065            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
1066            exp.JSONPathRoot: lambda *_: "",
1067            exp.JSONValueArray: _json_extract_value_array_sql,
1068            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
1069                rename_func("EDITDISTANCE")
1070            ),
1071            exp.LocationProperty: lambda self, e: f"LOCATION={self.sql(e, 'this')}",
1072            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
1073            exp.LogicalOr: rename_func("BOOLOR_AGG"),
1074            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1075            exp.MakeInterval: no_make_interval_sql,
1076            exp.Max: max_or_greatest,
1077            exp.Min: min_or_least,
1078            exp.ParseJSON: lambda self, e: self.func(
1079                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
1080            ),
1081            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1082            exp.PercentileCont: transforms.preprocess(
1083                [transforms.add_within_group_for_percentiles]
1084            ),
1085            exp.PercentileDisc: transforms.preprocess(
1086                [transforms.add_within_group_for_percentiles]
1087            ),
1088            exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]),
1089            exp.RegexpExtract: _regexpextract_sql,
1090            exp.RegexpExtractAll: _regexpextract_sql,
1091            exp.RegexpILike: _regexpilike_sql,
1092            exp.Rand: rename_func("RANDOM"),
1093            exp.Select: transforms.preprocess(
1094                [
1095                    transforms.eliminate_window_clause,
1096                    transforms.eliminate_distinct_on,
1097                    transforms.explode_projection_to_unnest(),
1098                    transforms.eliminate_semi_and_anti_joins,
1099                    _transform_generate_date_array,
1100                ]
1101            ),
1102            exp.SHA: rename_func("SHA1"),
1103            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
1104            exp.StartsWith: rename_func("STARTSWITH"),
1105            exp.EndsWith: rename_func("ENDSWITH"),
1106            exp.StrPosition: lambda self, e: strposition_sql(
1107                self, e, func_name="CHARINDEX", supports_position=True
1108            ),
1109            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
1110            exp.Stuff: rename_func("INSERT"),
1111            exp.StPoint: rename_func("ST_MAKEPOINT"),
1112            exp.TimeAdd: date_delta_sql("TIMEADD"),
1113            exp.Timestamp: no_timestamp_sql,
1114            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
1115            exp.TimestampDiff: lambda self, e: self.func(
1116                "TIMESTAMPDIFF", e.unit, e.expression, e.this
1117            ),
1118            exp.TimestampTrunc: timestamptrunc_sql(),
1119            exp.TimeStrToTime: timestrtotime_sql,
1120            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
1121            exp.ToArray: rename_func("TO_ARRAY"),
1122            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
1123            exp.ToDouble: rename_func("TO_DOUBLE"),
1124            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
1125            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
1126            exp.TsOrDsToDate: lambda self, e: self.func(
1127                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
1128            ),
1129            exp.TsOrDsToTime: lambda self, e: self.func(
1130                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
1131            ),
1132            exp.Unhex: rename_func("HEX_DECODE_BINARY"),
1133            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
1134            exp.Uuid: rename_func("UUID_STRING"),
1135            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
1136            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
1137            exp.Xor: rename_func("BOOLXOR"),
1138        }
1139
1140        SUPPORTED_JSON_PATH_PARTS = {
1141            exp.JSONPathKey,
1142            exp.JSONPathRoot,
1143            exp.JSONPathSubscript,
1144        }
1145
1146        TYPE_MAPPING = {
1147            **generator.Generator.TYPE_MAPPING,
1148            exp.DataType.Type.NESTED: "OBJECT",
1149            exp.DataType.Type.STRUCT: "OBJECT",
1150            exp.DataType.Type.BIGDECIMAL: "DOUBLE",
1151        }
1152
1153        TOKEN_MAPPING = {
1154            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
1155        }
1156
1157        PROPERTIES_LOCATION = {
1158            **generator.Generator.PROPERTIES_LOCATION,
1159            exp.CredentialsProperty: exp.Properties.Location.POST_WITH,
1160            exp.LocationProperty: exp.Properties.Location.POST_WITH,
1161            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1162            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1163            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1164        }
1165
1166        UNSUPPORTED_VALUES_EXPRESSIONS = {
1167            exp.Map,
1168            exp.StarMap,
1169            exp.Struct,
1170            exp.VarMap,
1171        }
1172
1173        RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS = (exp.ArrayAgg,)
1174
1175        def with_properties(self, properties: exp.Properties) -> str:
1176            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1177
1178        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1179            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1180                values_as_table = False
1181
1182            return super().values_sql(expression, values_as_table=values_as_table)
1183
1184        def datatype_sql(self, expression: exp.DataType) -> str:
1185            expressions = expression.expressions
1186            if (
1187                expressions
1188                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1189                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1190            ):
1191                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1192                return "OBJECT"
1193
1194            return super().datatype_sql(expression)
1195
1196        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1197            return self.func(
1198                "TO_NUMBER",
1199                expression.this,
1200                expression.args.get("format"),
1201                expression.args.get("precision"),
1202                expression.args.get("scale"),
1203            )
1204
1205        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1206            milli = expression.args.get("milli")
1207            if milli is not None:
1208                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1209                expression.set("nano", milli_to_nano)
1210
1211            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1212
1213        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1214            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1215                return self.func("TO_GEOGRAPHY", expression.this)
1216            if expression.is_type(exp.DataType.Type.GEOMETRY):
1217                return self.func("TO_GEOMETRY", expression.this)
1218
1219            return super().cast_sql(expression, safe_prefix=safe_prefix)
1220
1221        def trycast_sql(self, expression: exp.TryCast) -> str:
1222            value = expression.this
1223
1224            if value.type is None:
1225                from sqlglot.optimizer.annotate_types import annotate_types
1226
1227                value = annotate_types(value, dialect=self.dialect)
1228
1229            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1230                return super().trycast_sql(expression)
1231
1232            # TRY_CAST only works for string values in Snowflake
1233            return self.cast_sql(expression)
1234
1235        def log_sql(self, expression: exp.Log) -> str:
1236            if not expression.expression:
1237                return self.func("LN", expression.this)
1238
1239            return super().log_sql(expression)
1240
1241        def unnest_sql(self, expression: exp.Unnest) -> str:
1242            unnest_alias = expression.args.get("alias")
1243            offset = expression.args.get("offset")
1244
1245            unnest_alias_columns = unnest_alias.columns if unnest_alias else []
1246            value = seq_get(unnest_alias_columns, 0) or exp.to_identifier("value")
1247
1248            columns = [
1249                exp.to_identifier("seq"),
1250                exp.to_identifier("key"),
1251                exp.to_identifier("path"),
1252                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1253                value,
1254                exp.to_identifier("this"),
1255            ]
1256
1257            if unnest_alias:
1258                unnest_alias.set("columns", columns)
1259            else:
1260                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1261
1262            table_input = self.sql(expression.expressions[0])
1263            if not table_input.startswith("INPUT =>"):
1264                table_input = f"INPUT => {table_input}"
1265
1266            explode = f"TABLE(FLATTEN({table_input}))"
1267            alias = self.sql(unnest_alias)
1268            alias = f" AS {alias}" if alias else ""
1269            value = "" if isinstance(expression.parent, (exp.From, exp.Join)) else f"{value} FROM "
1270
1271            return f"{value}{explode}{alias}"
1272
1273        def show_sql(self, expression: exp.Show) -> str:
1274            terse = "TERSE " if expression.args.get("terse") else ""
1275            history = " HISTORY" if expression.args.get("history") else ""
1276            like = self.sql(expression, "like")
1277            like = f" LIKE {like}" if like else ""
1278
1279            scope = self.sql(expression, "scope")
1280            scope = f" {scope}" if scope else ""
1281
1282            scope_kind = self.sql(expression, "scope_kind")
1283            if scope_kind:
1284                scope_kind = f" IN {scope_kind}"
1285
1286            starts_with = self.sql(expression, "starts_with")
1287            if starts_with:
1288                starts_with = f" STARTS WITH {starts_with}"
1289
1290            limit = self.sql(expression, "limit")
1291
1292            from_ = self.sql(expression, "from")
1293            if from_:
1294                from_ = f" FROM {from_}"
1295
1296            privileges = self.expressions(expression, key="privileges", flat=True)
1297            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1298
1299            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
1300
1301        def describe_sql(self, expression: exp.Describe) -> str:
1302            # Default to table if kind is unknown
1303            kind_value = expression.args.get("kind") or "TABLE"
1304            kind = f" {kind_value}" if kind_value else ""
1305            this = f" {self.sql(expression, 'this')}"
1306            expressions = self.expressions(expression, flat=True)
1307            expressions = f" {expressions}" if expressions else ""
1308            return f"DESCRIBE{kind}{this}{expressions}"
1309
1310        def generatedasidentitycolumnconstraint_sql(
1311            self, expression: exp.GeneratedAsIdentityColumnConstraint
1312        ) -> str:
1313            start = expression.args.get("start")
1314            start = f" START {start}" if start else ""
1315            increment = expression.args.get("increment")
1316            increment = f" INCREMENT {increment}" if increment else ""
1317            return f"AUTOINCREMENT{start}{increment}"
1318
1319        def cluster_sql(self, expression: exp.Cluster) -> str:
1320            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1321
1322        def struct_sql(self, expression: exp.Struct) -> str:
1323            keys = []
1324            values = []
1325
1326            for i, e in enumerate(expression.expressions):
1327                if isinstance(e, exp.PropertyEQ):
1328                    keys.append(
1329                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1330                    )
1331                    values.append(e.expression)
1332                else:
1333                    keys.append(exp.Literal.string(f"_{i}"))
1334                    values.append(e)
1335
1336            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1337
1338        @unsupported_args("weight", "accuracy")
1339        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1340            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1341
1342        def alterset_sql(self, expression: exp.AlterSet) -> str:
1343            exprs = self.expressions(expression, flat=True)
1344            exprs = f" {exprs}" if exprs else ""
1345            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1346            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1347            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1348            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1349            tag = self.expressions(expression, key="tag", flat=True)
1350            tag = f" TAG {tag}" if tag else ""
1351
1352            return f"SET{exprs}{file_format}{copy_options}{tag}"
1353
1354        def strtotime_sql(self, expression: exp.StrToTime):
1355            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1356            return self.func(
1357                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1358            )
1359
1360        def timestampsub_sql(self, expression: exp.TimestampSub):
1361            return self.sql(
1362                exp.TimestampAdd(
1363                    this=expression.this,
1364                    expression=expression.expression * -1,
1365                    unit=expression.unit,
1366                )
1367            )
1368
1369        def jsonextract_sql(self, expression: exp.JSONExtract):
1370            this = expression.this
1371
1372            # JSON strings are valid coming from other dialects such as BQ
1373            return self.func(
1374                "GET_PATH",
1375                exp.ParseJSON(this=this) if this.is_string else this,
1376                expression.expression,
1377            )
1378
1379        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1380            this = expression.this
1381            if not isinstance(this, exp.TsOrDsToTimestamp):
1382                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1383
1384            return self.func("TO_CHAR", this, self.format_time(expression))
1385
1386        def datesub_sql(self, expression: exp.DateSub) -> str:
1387            value = expression.expression
1388            if value:
1389                value.replace(value * (-1))
1390            else:
1391                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1392
1393            return date_delta_sql("DATEADD")(self, expression)
1394
1395        def select_sql(self, expression: exp.Select) -> str:
1396            limit = expression.args.get("limit")
1397            offset = expression.args.get("offset")
1398            if offset and not limit:
1399                expression.limit(exp.Null(), copy=False)
1400            return super().select_sql(expression)
1401
1402        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
1403            is_materialized = expression.find(exp.MaterializedProperty)
1404            copy_grants_property = expression.find(exp.CopyGrantsProperty)
1405
1406            if expression.kind == "VIEW" and is_materialized and copy_grants_property:
1407                # For materialized views, COPY GRANTS is located *before* the columns list
1408                # This is in contrast to normal views where COPY GRANTS is located *after* the columns list
1409                # We default CopyGrantsProperty to POST_SCHEMA which means we need to output it POST_NAME if a materialized view is detected
1410                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-materialized-view#syntax
1411                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-view#syntax
1412                post_schema_properties = locations[exp.Properties.Location.POST_SCHEMA]
1413                post_schema_properties.pop(post_schema_properties.index(copy_grants_property))
1414
1415                this_name = self.sql(expression.this, "this")
1416                copy_grants = self.sql(copy_grants_property)
1417                this_schema = self.schema_columns_sql(expression.this)
1418                this_schema = f"{self.sep()}{this_schema}" if this_schema else ""
1419
1420                return f"{this_name}{self.sep()}{copy_grants}{this_schema}"
1421
1422            return super().createable_sql(expression, locations)
1423
1424        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
1425            this = expression.this
1426
1427            # If an ORDER BY clause is present, we need to remove it from ARRAY_AGG
1428            # and add it later as part of the WITHIN GROUP clause
1429            order = this if isinstance(this, exp.Order) else None
1430            if order:
1431                expression.set("this", order.this.pop())
1432
1433            expr_sql = super().arrayagg_sql(expression)
1434
1435            if order:
1436                expr_sql = self.sql(exp.WithinGroup(this=expr_sql, expression=order))
1437
1438            return expr_sql
1439
1440        def array_sql(self, expression: exp.Array) -> str:
1441            expressions = expression.expressions
1442
1443            first_expr = seq_get(expressions, 0)
1444            if isinstance(first_expr, exp.Select):
1445                # SELECT AS STRUCT foo AS alias_foo -> ARRAY_AGG(OBJECT_CONSTRUCT('alias_foo', foo))
1446                if first_expr.text("kind").upper() == "STRUCT":
1447                    object_construct_args = []
1448                    for expr in first_expr.expressions:
1449                        # Alias case: SELECT AS STRUCT foo AS alias_foo -> OBJECT_CONSTRUCT('alias_foo', foo)
1450                        # Column case: SELECT AS STRUCT foo -> OBJECT_CONSTRUCT('foo', foo)
1451                        name = expr.this if isinstance(expr, exp.Alias) else expr
1452
1453                        object_construct_args.extend([exp.Literal.string(expr.alias_or_name), name])
1454
1455                    array_agg = exp.ArrayAgg(
1456                        this=_build_object_construct(args=object_construct_args)
1457                    )
1458
1459                    first_expr.set("kind", None)
1460                    first_expr.set("expressions", [array_agg])
1461
1462                    return self.sql(first_expr.subquery())
1463
1464            return inline_array_sql(self, expression)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_MEDIAN = True
ARRAY_SIZE_NAME = 'ARRAY_SIZE'
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeColumns'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeWith'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Ceil'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConvertToCharset'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CredentialsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EnviromentProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Floor'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Get'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Int64'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByBucket'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PartitionByTruncate'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Put'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Tags'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UsingTemplateProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UsingData'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ForceProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayIntersect'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseLeftShift'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseRightShift'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeDiff'>: <function timestampdiff_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeekIso'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.FileFormatProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtractArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONValueArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.Levenshtein'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.MakeInterval'>: <function no_make_interval_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpExtract'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpExtractAll'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.EndsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StPoint'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.Timestamp'>: <function no_timestamp_sql>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.DATETIME2: 'DATETIME2'>: 'TIMESTAMP', <Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.BLOB: 'BLOB'>: 'VARBINARY', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.SMALLDATETIME: 'SMALLDATETIME'>: 'TIMESTAMP', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'DOUBLE'}
TOKEN_MAPPING = {<TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>: 'AUTOINCREMENT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EncodeProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EnviromentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.IncludeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StorageHandlerProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Tags'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.UsingTemplateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ForceProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.CredentialsProperty'>: <Location.POST_WITH: 'POST_WITH'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.StarMap'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.Map'>}
RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS = (<class 'sqlglot.expressions.ArrayAgg'>,)
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
1175        def with_properties(self, properties: exp.Properties) -> str:
1176            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
1178        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1179            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1180                values_as_table = False
1181
1182            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
1184        def datatype_sql(self, expression: exp.DataType) -> str:
1185            expressions = expression.expressions
1186            if (
1187                expressions
1188                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1189                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1190            ):
1191                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1192                return "OBJECT"
1193
1194            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
1196        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1197            return self.func(
1198                "TO_NUMBER",
1199                expression.this,
1200                expression.args.get("format"),
1201                expression.args.get("precision"),
1202                expression.args.get("scale"),
1203            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
1205        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1206            milli = expression.args.get("milli")
1207            if milli is not None:
1208                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1209                expression.set("nano", milli_to_nano)
1210
1211            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
1213        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1214            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1215                return self.func("TO_GEOGRAPHY", expression.this)
1216            if expression.is_type(exp.DataType.Type.GEOMETRY):
1217                return self.func("TO_GEOMETRY", expression.this)
1218
1219            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
1221        def trycast_sql(self, expression: exp.TryCast) -> str:
1222            value = expression.this
1223
1224            if value.type is None:
1225                from sqlglot.optimizer.annotate_types import annotate_types
1226
1227                value = annotate_types(value, dialect=self.dialect)
1228
1229            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1230                return super().trycast_sql(expression)
1231
1232            # TRY_CAST only works for string values in Snowflake
1233            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
1235        def log_sql(self, expression: exp.Log) -> str:
1236            if not expression.expression:
1237                return self.func("LN", expression.this)
1238
1239            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
1241        def unnest_sql(self, expression: exp.Unnest) -> str:
1242            unnest_alias = expression.args.get("alias")
1243            offset = expression.args.get("offset")
1244
1245            unnest_alias_columns = unnest_alias.columns if unnest_alias else []
1246            value = seq_get(unnest_alias_columns, 0) or exp.to_identifier("value")
1247
1248            columns = [
1249                exp.to_identifier("seq"),
1250                exp.to_identifier("key"),
1251                exp.to_identifier("path"),
1252                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1253                value,
1254                exp.to_identifier("this"),
1255            ]
1256
1257            if unnest_alias:
1258                unnest_alias.set("columns", columns)
1259            else:
1260                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1261
1262            table_input = self.sql(expression.expressions[0])
1263            if not table_input.startswith("INPUT =>"):
1264                table_input = f"INPUT => {table_input}"
1265
1266            explode = f"TABLE(FLATTEN({table_input}))"
1267            alias = self.sql(unnest_alias)
1268            alias = f" AS {alias}" if alias else ""
1269            value = "" if isinstance(expression.parent, (exp.From, exp.Join)) else f"{value} FROM "
1270
1271            return f"{value}{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
1273        def show_sql(self, expression: exp.Show) -> str:
1274            terse = "TERSE " if expression.args.get("terse") else ""
1275            history = " HISTORY" if expression.args.get("history") else ""
1276            like = self.sql(expression, "like")
1277            like = f" LIKE {like}" if like else ""
1278
1279            scope = self.sql(expression, "scope")
1280            scope = f" {scope}" if scope else ""
1281
1282            scope_kind = self.sql(expression, "scope_kind")
1283            if scope_kind:
1284                scope_kind = f" IN {scope_kind}"
1285
1286            starts_with = self.sql(expression, "starts_with")
1287            if starts_with:
1288                starts_with = f" STARTS WITH {starts_with}"
1289
1290            limit = self.sql(expression, "limit")
1291
1292            from_ = self.sql(expression, "from")
1293            if from_:
1294                from_ = f" FROM {from_}"
1295
1296            privileges = self.expressions(expression, key="privileges", flat=True)
1297            privileges = f" WITH PRIVILEGES {privileges}" if privileges else ""
1298
1299            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}{privileges}"
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1301        def describe_sql(self, expression: exp.Describe) -> str:
1302            # Default to table if kind is unknown
1303            kind_value = expression.args.get("kind") or "TABLE"
1304            kind = f" {kind_value}" if kind_value else ""
1305            this = f" {self.sql(expression, 'this')}"
1306            expressions = self.expressions(expression, flat=True)
1307            expressions = f" {expressions}" if expressions else ""
1308            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1310        def generatedasidentitycolumnconstraint_sql(
1311            self, expression: exp.GeneratedAsIdentityColumnConstraint
1312        ) -> str:
1313            start = expression.args.get("start")
1314            start = f" START {start}" if start else ""
1315            increment = expression.args.get("increment")
1316            increment = f" INCREMENT {increment}" if increment else ""
1317            return f"AUTOINCREMENT{start}{increment}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1319        def cluster_sql(self, expression: exp.Cluster) -> str:
1320            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1322        def struct_sql(self, expression: exp.Struct) -> str:
1323            keys = []
1324            values = []
1325
1326            for i, e in enumerate(expression.expressions):
1327                if isinstance(e, exp.PropertyEQ):
1328                    keys.append(
1329                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1330                    )
1331                    values.append(e.expression)
1332                else:
1333                    keys.append(exp.Literal.string(f"_{i}"))
1334                    values.append(e)
1335
1336            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1338        @unsupported_args("weight", "accuracy")
1339        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1340            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1342        def alterset_sql(self, expression: exp.AlterSet) -> str:
1343            exprs = self.expressions(expression, flat=True)
1344            exprs = f" {exprs}" if exprs else ""
1345            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1346            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1347            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1348            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1349            tag = self.expressions(expression, key="tag", flat=True)
1350            tag = f" TAG {tag}" if tag else ""
1351
1352            return f"SET{exprs}{file_format}{copy_options}{tag}"
def strtotime_sql(self, expression: sqlglot.expressions.StrToTime):
1354        def strtotime_sql(self, expression: exp.StrToTime):
1355            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1356            return self.func(
1357                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1358            )
def timestampsub_sql(self, expression: sqlglot.expressions.TimestampSub):
1360        def timestampsub_sql(self, expression: exp.TimestampSub):
1361            return self.sql(
1362                exp.TimestampAdd(
1363                    this=expression.this,
1364                    expression=expression.expression * -1,
1365                    unit=expression.unit,
1366                )
1367            )
def jsonextract_sql(self, expression: sqlglot.expressions.JSONExtract):
1369        def jsonextract_sql(self, expression: exp.JSONExtract):
1370            this = expression.this
1371
1372            # JSON strings are valid coming from other dialects such as BQ
1373            return self.func(
1374                "GET_PATH",
1375                exp.ParseJSON(this=this) if this.is_string else this,
1376                expression.expression,
1377            )
def timetostr_sql(self, expression: sqlglot.expressions.TimeToStr) -> str:
1379        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1380            this = expression.this
1381            if not isinstance(this, exp.TsOrDsToTimestamp):
1382                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1383
1384            return self.func("TO_CHAR", this, self.format_time(expression))
def datesub_sql(self, expression: sqlglot.expressions.DateSub) -> str:
1386        def datesub_sql(self, expression: exp.DateSub) -> str:
1387            value = expression.expression
1388            if value:
1389                value.replace(value * (-1))
1390            else:
1391                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1392
1393            return date_delta_sql("DATEADD")(self, expression)
def select_sql(self, expression: sqlglot.expressions.Select) -> str:
1395        def select_sql(self, expression: exp.Select) -> str:
1396            limit = expression.args.get("limit")
1397            offset = expression.args.get("offset")
1398            if offset and not limit:
1399                expression.limit(exp.Null(), copy=False)
1400            return super().select_sql(expression)
def createable_sql( self, expression: sqlglot.expressions.Create, locations: DefaultDict) -> str:
1402        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
1403            is_materialized = expression.find(exp.MaterializedProperty)
1404            copy_grants_property = expression.find(exp.CopyGrantsProperty)
1405
1406            if expression.kind == "VIEW" and is_materialized and copy_grants_property:
1407                # For materialized views, COPY GRANTS is located *before* the columns list
1408                # This is in contrast to normal views where COPY GRANTS is located *after* the columns list
1409                # We default CopyGrantsProperty to POST_SCHEMA which means we need to output it POST_NAME if a materialized view is detected
1410                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-materialized-view#syntax
1411                # ref: https://docs.snowflake.com/en/sql-reference/sql/create-view#syntax
1412                post_schema_properties = locations[exp.Properties.Location.POST_SCHEMA]
1413                post_schema_properties.pop(post_schema_properties.index(copy_grants_property))
1414
1415                this_name = self.sql(expression.this, "this")
1416                copy_grants = self.sql(copy_grants_property)
1417                this_schema = self.schema_columns_sql(expression.this)
1418                this_schema = f"{self.sep()}{this_schema}" if this_schema else ""
1419
1420                return f"{this_name}{self.sep()}{copy_grants}{this_schema}"
1421
1422            return super().createable_sql(expression, locations)
def arrayagg_sql(self, expression: sqlglot.expressions.ArrayAgg) -> str:
1424        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
1425            this = expression.this
1426
1427            # If an ORDER BY clause is present, we need to remove it from ARRAY_AGG
1428            # and add it later as part of the WITHIN GROUP clause
1429            order = this if isinstance(this, exp.Order) else None
1430            if order:
1431                expression.set("this", order.this.pop())
1432
1433            expr_sql = super().arrayagg_sql(expression)
1434
1435            if order:
1436                expr_sql = self.sql(exp.WithinGroup(this=expr_sql, expression=order))
1437
1438            return expr_sql
def array_sql(self, expression: sqlglot.expressions.Array) -> str:
1440        def array_sql(self, expression: exp.Array) -> str:
1441            expressions = expression.expressions
1442
1443            first_expr = seq_get(expressions, 0)
1444            if isinstance(first_expr, exp.Select):
1445                # SELECT AS STRUCT foo AS alias_foo -> ARRAY_AGG(OBJECT_CONSTRUCT('alias_foo', foo))
1446                if first_expr.text("kind").upper() == "STRUCT":
1447                    object_construct_args = []
1448                    for expr in first_expr.expressions:
1449                        # Alias case: SELECT AS STRUCT foo AS alias_foo -> OBJECT_CONSTRUCT('alias_foo', foo)
1450                        # Column case: SELECT AS STRUCT foo -> OBJECT_CONSTRUCT('foo', foo)
1451                        name = expr.this if isinstance(expr, exp.Alias) else expr
1452
1453                        object_construct_args.extend([exp.Literal.string(expr.alias_or_name), name])
1454
1455                    array_agg = exp.ArrayAgg(
1456                        this=_build_object_construct(args=object_construct_args)
1457                    )
1458
1459                    first_expr.set("kind", None)
1460                    first_expr.set("expressions", [array_agg])
1461
1462                    return self.sql(first_expr.subquery())
1463
1464            return inline_array_sql(self, expression)
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SUPPORTS_WINDOW_EXCLUDE
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
SUPPORTS_UNIX_SECONDS
ALTER_SET_WRAPPED
PARSE_JSON_NAME
ALTER_SET_TYPE
ARRAY_SIZE_DIM_REQUIRED
TIME_PART_SINGULARS
NAMED_PLACEHOLDER_TOKEN
EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
sanitize_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
limitoptions_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablefromrows_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
for_modifiers
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterindex_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
addpartition_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
safedivide_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
jsoncast_sql
try_sql
use_sql
binary
ceil_floor
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
whens_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
uniquekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodatetime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonextractquote_sql
jsonexists_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql
string_sql
median_sql
overflowtruncatebehavior_sql
unixseconds_sql
arraysize_sql
attach_sql
detach_sql
attachoption_sql
featuresattime_sql
watermarkcolumnconstraint_sql
encodeproperty_sql
includeproperty_sql
xmlelement_sql
xmlkeyvalueoption_sql
partitionbyrangeproperty_sql
partitionbyrangepropertydynamic_sql
unpivotcolumns_sql
analyzesample_sql
analyzestatistics_sql
analyzehistogram_sql
analyzedelete_sql
analyzelistchainedrows_sql
analyzevalidate_sql
analyze_sql
xmltable_sql
xmlnamespace_sql
export_sql
declare_sql
declareitem_sql
recursivewithsearch_sql
parameterizedagg_sql
anonymousaggfunc_sql
combinedaggfunc_sql
combinedparameterizedagg_sql
get_put_sql
translatecharacters_sql