diff --git a/.changes/unreleased/Features-20230924-211828.yaml b/.changes/unreleased/Features-20230924-211828.yaml new file mode 100644 index 00000000000..f9de9c0aa99 --- /dev/null +++ b/.changes/unreleased/Features-20230924-211828.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Scala model initial version +time: 2023-09-24T21:18:28.919451+03:00 +custom: + Author: pekapa + Issue: "8701" diff --git a/core/dbt/adapters/base/__init__.py b/core/dbt/adapters/base/__init__.py index 07f5303992e..6a0629fc332 100644 --- a/core/dbt/adapters/base/__init__.py +++ b/core/dbt/adapters/base/__init__.py @@ -14,6 +14,7 @@ AdapterConfig, BaseAdapter, PythonJobHelper, + ScalaJobHelper, ConstraintSupport, ) from dbt.adapters.base.plugin import AdapterPlugin # noqa: F401 diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 37ab45f8292..f22518381a8 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -134,8 +134,10 @@ def _relation_name(rel: Optional[BaseRelation]) -> str: def log_code_execution(code_execution_function): # decorator to log code and execution time - if code_execution_function.__name__ != "submit_python_job": - raise ValueError("this should be only used to log submit_python_job now") + if code_execution_function.__name__ not in ("submit_python_job", "submit_scala_job"): + raise ValueError( + "this should be only used to log submit_python_job and submit_scala_job now" + ) def execution_with_log(*args): self = args[0] @@ -161,6 +163,14 @@ def submit(self, compiled_code: str) -> Any: raise NotImplementedError("PythonJobHelper submit function is not implemented yet") +class ScalaJobHelper: + def __init__(self, parsed_model: Dict, credential: Credentials) -> None: + raise NotImplementedError("ScalaJobHelper is not implemented yet") + + def submit(self, compiled_code: str) -> Any: + raise NotImplementedError("ScalaJobHelper submit function is not implemented yet") + + class BaseAdapter(metaclass=AdapterMeta): """The BaseAdapter provides an abstract base class for adapters. @@ -1287,6 +1297,37 @@ def generate_python_submission_response(self, submission_result: Any) -> Adapter "Your adapter need to implement generate_python_submission_response" ) + @property + def scala_submission_helpers(self) -> Dict[str, Type[ScalaJobHelper]]: + raise NotImplementedError("python_submission_helpers is not specified") + + @property + def default_scala_submission_method(self) -> str: + raise NotImplementedError("default_scala_submission_method is not specified") + + @log_code_execution + def submit_scala_job(self, parsed_model: dict, compiled_code: str) -> AdapterResponse: + submission_method = parsed_model["config"].get( + "submission_method", self.default_python_submission_method + ) + if submission_method not in self.scala_submission_helpers: + raise NotImplementedError( + "Submission method {} is not supported for current adapter".format( + submission_method + ) + ) + job_helper = self.scala_submission_helpers[submission_method]( + parsed_model, self.connections.profile.credentials + ) + submission_result = job_helper.submit(compiled_code) + # process submission result to generate adapter response + return self.generate_scala_submission_response(submission_result) + + def generate_scala_submission_response(self, submission_result: Any) -> AdapterResponse: + raise NotImplementedError( + "Your adapter need to implement generate_scala_submission_response" + ) + def valid_incremental_strategies(self): """The set of standard builtin strategies which this adapter supports out-of-the-box. Not used to validate custom strategies defined by end users. diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index f42141d700a..a5bfbeca661 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -409,24 +409,29 @@ def _compile_code( if extra_context is None: extra_context = {} - if node.language == ModelLanguage.python: - context = self._create_node_context(node, manifest, extra_context) + context = self._create_node_context(node, manifest, extra_context) + + # we should NOT jinja render the python and scala model's 'raw code' + code_to_render = { + ModelLanguage.python: "{{ py_script_postfix(model) }}", + ModelLanguage.scala: "{{ scala_script_prefix(model) }}", + ModelLanguage.sql: node.raw_code, + } - postfix = jinja.get_rendered( - "{{ py_script_postfix(model) }}", + rendered_code = jinja.get_rendered( + code_to_render[node.language], context, node, ) - # we should NOT jinja render the python model's 'raw code' - node.compiled_code = f"{node.raw_code}\n\n{postfix}" + + if node.language == ModelLanguage.python: + node.compiled_code = f"{node.raw_code}\n\n{rendered_code}" + + elif node.language == ModelLanguage.scala: + node.compiled_code = f"{rendered_code}\n\n{node.raw_code}" else: - context = self._create_node_context(node, manifest, extra_context) - node.compiled_code = jinja.get_rendered( - node.raw_code, - context, - node, - ) + node.compiled_code = rendered_code node.compiled = True diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 996d5027c58..b409ec9c1a6 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -1328,6 +1328,19 @@ def submit_python_job(self, parsed_model: Dict, compiled_code: str) -> AdapterRe ) return self.adapter.submit_python_job(parsed_model, compiled_code) + @contextmember + def submit_scala_job(self, parsed_model: Dict, compiled_code: str) -> AdapterResponse: + # Check macro_stack and that the unique id is for a materialization macro + if not ( + self.context_macro_stack.depth == 2 + and self.context_macro_stack.call_stack[1] == "macro.dbt.statement" + and "materialization" in self.context_macro_stack.call_stack[0] + ): + raise DbtRuntimeError( + f"submit_scala_job is not intended to be called here, at model {parsed_model['alias']}, with macro call_stack {self.context_macro_stack.call_stack}." + ) + return self.adapter.submit_scala_job(parsed_model, compiled_code) + class MacroContext(ProviderContext): """Internally, macros can be executed like nodes, with some restrictions: diff --git a/core/dbt/contracts/files.py b/core/dbt/contracts/files.py index 955757a02f7..89606336e2c 100644 --- a/core/dbt/contracts/files.py +++ b/core/dbt/contracts/files.py @@ -115,6 +115,8 @@ def __init__(self, language) -> None: self.path_end = ".sql" elif language == "python": self.path_end = ".py" + elif language == "scala": + self.path_end = ".scala" else: raise RuntimeError(f"Invalid language for remote File {language}") self.path = f"from remote system{self.path_end}" diff --git a/core/dbt/graph/selector_spec.py b/core/dbt/graph/selector_spec.py index 31ffc050585..e40734db646 100644 --- a/core/dbt/graph/selector_spec.py +++ b/core/dbt/graph/selector_spec.py @@ -82,7 +82,7 @@ def __post_init__(self): def default_method(cls, value: str) -> MethodName: if _probably_path(value): return MethodName.Path - elif value.lower().endswith((".sql", ".py", ".csv")): + elif value.lower().endswith((".sql", ".py", ".scala", ".csv")): return MethodName.File else: return MethodName.FQN diff --git a/core/dbt/include/global_project/macros/etc/statement.sql b/core/dbt/include/global_project/macros/etc/statement.sql index 8fb98f8c811..fe8b11e8230 100644 --- a/core/dbt/include/global_project/macros/etc/statement.sql +++ b/core/dbt/include/global_project/macros/etc/statement.sql @@ -15,6 +15,10 @@ The macro override naming method (spark__statement) only works for macros which {%- set res = submit_python_job(model, compiled_code) -%} {#-- TODO: What should table be for python models? --#} {%- set table = None -%} + {%- elif language == 'scala' -%} + {%- set res = submit_scala_job(model, compiled_code) -%} + {#-- TODO: What should table be for scala models? --#} + {%- set table = None -%} {%- else -%} {% do exceptions.raise_compiler_error("statement macro didn't get supported language") %} {%- endif -%} diff --git a/core/dbt/include/global_project/macros/python_model/python.sql b/core/dbt/include/global_project/macros/python_model/python.sql index d658ff185b2..751f9761f82 100644 --- a/core/dbt/include/global_project/macros/python_model/python.sql +++ b/core/dbt/include/global_project/macros/python_model/python.sql @@ -1,25 +1,7 @@ -{% macro resolve_model_name(input_model_name) %} - {{ return(adapter.dispatch('resolve_model_name', 'dbt')(input_model_name)) }} -{% endmacro %} - -{%- macro default__resolve_model_name(input_model_name) -%} - {{ input_model_name | string | replace('"', '\"') }} -{%- endmacro -%} - -{% macro build_ref_function(model) %} - - {%- set ref_dict = {} -%} - {%- for _ref in model.refs -%} - {% set _ref_args = [_ref.get('package'), _ref['name']] if _ref.get('package') else [_ref['name'],] %} - {%- set resolved = ref(*_ref_args, v=_ref.get('version')) -%} - {%- if _ref.get('version') -%} - {% do _ref_args.extend(["v" ~ _ref['version']]) %} - {%- endif -%} - {%- do ref_dict.update({_ref_args | join('.'): resolve_model_name(resolved)}) -%} - {%- endfor -%} +{% macro build_py_ref_function(model) %} def ref(*args, **kwargs): - refs = {{ ref_dict | tojson }} + refs = {{ build_ref_dict(model) | tojson }} key = '.'.join(args) version = kwargs.get("v") or kwargs.get("version") if version: @@ -29,33 +11,16 @@ def ref(*args, **kwargs): {% endmacro %} -{% macro build_source_function(model) %} - - {%- set source_dict = {} -%} - {%- for _source in model.sources -%} - {%- set resolved = source(*_source) -%} - {%- do source_dict.update({_source | join('.'): resolve_model_name(resolved)}) -%} - {%- endfor -%} +{% macro build_py_source_function(model) %} def source(*args, dbt_load_df_function): - sources = {{ source_dict | tojson }} + sources = {{ build_source_dict(model) | tojson }} key = '.'.join(args) return dbt_load_df_function(sources[key]) - {% endmacro %} -{% macro build_config_dict(model) %} - {%- set config_dict = {} -%} - {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %} - {%- for key, default in config_dbt_used -%} - {# weird type testing with enum, would be much easier to write this logic in Python! #} - {%- if key == "language" -%} - {%- set value = "python" -%} - {%- endif -%} - {%- set value = model.config.get(key, default) -%} - {%- do config_dict.update({key: value}) -%} - {%- endfor -%} -config_dict = {{ config_dict }} +{% macro build_py_config_dict(model) %} +config_dict = {{ build_config_dict(model) }} {% endmacro %} {% macro py_script_postfix(model) %} @@ -64,9 +29,9 @@ config_dict = {{ config_dict }} # COMMAND ---------- # this part is dbt logic for get ref work, do not modify -{{ build_ref_function(model ) }} -{{ build_source_function(model ) }} -{{ build_config_dict(model) }} +{{ build_py_ref_function(model ) }} +{{ build_py_source_function(model ) }} +{{ build_py_config_dict(model) }} class config: def __init__(self, *args, **kwargs): diff --git a/core/dbt/include/global_project/macros/scala_model/scala.sql b/core/dbt/include/global_project/macros/scala_model/scala.sql new file mode 100644 index 00000000000..00e2a362975 --- /dev/null +++ b/core/dbt/include/global_project/macros/scala_model/scala.sql @@ -0,0 +1,51 @@ +{% macro scala_script_prefix(model) %} +// this part is dbt logic for get ref work, do not modify +// you will need to copy the next section to run the code + +import org.apache.spark.sql.{DataFrame, SparkSession} +import sqlContext.implicits.StringToColumn +import org.json4s.jackson.JsonMethods.{parse => parse_json} + + +case class configObj(options: Map[String, Any]) +object configObj { + val cfg = parse_json("""{{ build_config_dict(model) | tojson }}""").values.asInstanceOf[Map[String, String]] + def get = cfg.getOrElse _ + def get(key: String): String = { + val v = cfg.get(key) + if (v.isDefined) v.get else None.toString + } +} + + +class dbtObj(load_df_function: String => DataFrame) { + val is_incremental = {{ is_incremental() | lower }} + val database = "{{ this.database }}" + val schema = "{{ this.schema }}" + val identifier = "{{ this.identifier | lower }}" + {% set this_relation_name = resolve_model_name(this) %} + override def toString = "{{ this_relation_name }}" + val source_map = parse_json("""{{ build_source_dict(model) | tojson }}""").values.asInstanceOf[Map[String, String]] + val ref_map = parse_json("""{{ build_ref_dict(model) | tojson }}""").values.asInstanceOf[Map[String, String]] + val config = configObj + + def source(args: String*): DataFrame = { + val key = args.mkString(".") + load_df_function(source_map(key)) + } + + def ref(args: String*): DataFrame = { + val key = args.mkString(".") + load_df_function(ref_map(key)) + } +} + +// COMMAND ---------- +{{scala_script_comment()}} +// COMMAND ---------- +// This part is user provided model code +{% endmacro %} + +{#-- entry point for add instuctions for running compiled_code --#} +{%macro scala_script_comment()%} +{%endmacro%} \ No newline at end of file diff --git a/core/dbt/include/global_project/macros/utils/language_models_utils.sql b/core/dbt/include/global_project/macros/utils/language_models_utils.sql new file mode 100644 index 00000000000..c6bea609b53 --- /dev/null +++ b/core/dbt/include/global_project/macros/utils/language_models_utils.sql @@ -0,0 +1,43 @@ +{% macro resolve_model_name(input_model_name) %} + {{ return(adapter.dispatch('resolve_model_name', 'dbt')(input_model_name)) }} +{% endmacro %} + +{%- macro default__resolve_model_name(input_model_name) -%} + {{ input_model_name | string | replace('"', '\"') }} +{%- endmacro -%} + +{% macro build_ref_dict(model) %} + {%- set ref_dict = {} -%} + {%- for _ref in model.refs -%} + {% set _ref_args = [_ref.get('package'), _ref['name']] if _ref.get('package') else [_ref['name'],] %} + {%- set resolved = ref(*_ref_args, v=_ref.get('version')) -%} + {%- if _ref.get('version') -%} + {% do _ref_args.extend(["v" ~ _ref['version']]) %} + {%- endif -%} + {%- do ref_dict.update({_ref_args | join('.'): resolve_model_name(resolved)}) -%} + {%- endfor -%} + {{ return(ref_dict) }} +{% endmacro %} + +{% macro build_source_dict(model) %} + {%- set source_dict = {} -%} + {%- for _source in model.sources -%} + {%- set resolved = source(*_source) -%} + {%- do source_dict.update({_source | join('.'): resolve_model_name(resolved)}) -%} + {%- endfor -%} + {{ return(source_dict) }} +{% endmacro %} + +{% macro build_config_dict(model) %} + {%- set config_dict = {} -%} + {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %} + {%- for key, default in config_dbt_used -%} + {# weird type testing with enum, would be much easier to write this logic in Python! #} + {%- if key == "language" -%} + {%- set value = "scala" -%} + {%- endif -%} + {%- set value = model.config.get(key, default) -%} + {%- do config_dict.update({key: value}) -%} + {%- endfor -%} + {{ return(config_dict) }} +{% endmacro %} \ No newline at end of file diff --git a/core/dbt/node_types.py b/core/dbt/node_types.py index b2825b29e09..17c6df896ce 100644 --- a/core/dbt/node_types.py +++ b/core/dbt/node_types.py @@ -89,4 +89,5 @@ class RunHookType(StrEnum): class ModelLanguage(StrEnum): python = "python" + scala = "scala" sql = "sql" diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index dde4aa5035c..db675ee2c96 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -220,6 +220,9 @@ def _create_parsetime_node( if block.path.relative_path.endswith(".py"): language = ModelLanguage.python config.add_config_call({"materialized": "table"}) + elif block.path.relative_path.endswith(".scala"): + language = ModelLanguage.scala + config.add_config_call({"materialized": "table"}) else: # this is not ideal but we have a lot of tests to adjust if don't do it language = ModelLanguage.sql diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index bff46e41c9f..1bbd0625cf6 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -28,6 +28,12 @@ UndefinedMacroError, ) +# New for Scala models +from pygments import lex +from pygments.lexers import ScalaLexer +from pygments.token import Token + + dbt_function_key_words = set(["ref", "source", "config", "get"]) dbt_function_full_names = set(["dbt.ref", "dbt.source", "dbt.config", "dbt.config.get"]) @@ -166,6 +172,20 @@ def verify_python_model_code(node): raise ParsingError("No jinja in python model code is allowed", node=node) +def verify_scala_model_code(node): + # TODO: add a test for this + try: + rendered_scala = get_rendered( + node.raw_code, + {}, + node, + ) + if rendered_scala != node.raw_code: + raise ParsingError("") + except (UndefinedMacroError, ParsingError): + raise ParsingError("No jinja in scala model code is allowed", node=node) + + class ModelParser(SimpleSQLParser[ModelNode]): def parse_from_dict(self, dct, validate=True) -> ModelNode: if validate: @@ -228,6 +248,168 @@ def parse_python_model(self, node, config, context): config_keys_defaults=config_keys_defaults, ) + def parse_scala_model(self, node, config, context): + lexer = ScalaLexer() + tokens = list(lex(node.raw_code, lexer)) + + in_import_statement = False + packages_imported = [] + current_import = None + in_function_def = False + function_defs = [] + current_function_def = None + in_dbt_function_call = False + in_dbt_function_naming = False + in_dbt_function_arguments = False + dbt_function_calls = [] + current_dbt_function = None + current_dbt_arguments = None + open_parenthesis = 0 + current_level = open_parenthesis + + for token_type, token_value in tokens: + # identify imports + if token_type is Token.Keyword and token_value == 'import': + in_import_statement = True + current_import = [] + continue + + if in_import_statement: + if token_type is Token.Name.Namespace: + current_import.append(token_value) + elif token_type is Token.Text.Whitespace and '\n' in token_value: + in_import_statement = False + packages_imported.append(".".join(current_import)) + elif token_type is Token.Punctuation and token_value == ',': + in_import_statement = False + packages_imported.append(".".join(current_import)) + continue + + # Find all function definitions + if token_type is Token.Name.Function: + current_function_def = {'name': token_value, 'arguments': []} + in_function_def = True + continue + + if in_function_def: + if token_type is Token.Punctuation and token_value == "(": + open_parenthesis += 1 + elif token_type is Token.Name: + current_function_def['arguments'].append(token_value) + elif token_type is Token.Punctuation and token_value == ")": + open_parenthesis -= 1 + in_function_def = False + function_defs.append(current_function_def) + elif token_type is Token.Operator and token_value == "=": + in_function_def = False + function_defs.append(current_function_def) + continue + + # identify dbt function calls + if token_type is Token.Name and token_value == "dbt": + in_dbt_function_call = True + in_dbt_function_naming = True + in_dbt_function_arguments = False + current_dbt_function = [] + current_dbt_arguments = [] + current_level = open_parenthesis + continue + + if in_dbt_function_call: + if token_type is Token.Name: + if in_dbt_function_naming: + current_dbt_function.append(token_value) + if in_dbt_function_arguments: + # ensure function calls are made using literals only + raise ParsingError( + "In dbt scala model, `dbt.ref`, `dbt.source`, `dbt.config`, `dbt.config.get` function args only support Scala literal structures", + node=node + ) + elif token_type is Token.Punctuation and token_value == '(': + open_parenthesis += 1 + if in_dbt_function_naming: + in_dbt_function_naming = False + in_dbt_function_arguments = True + elif token_type is Token.Literal.String and in_dbt_function_arguments: + # strings -> remove quote marks + current_dbt_arguments.append(token_value[1:-1]) + elif token_type is Token.Keyword.Constant and in_dbt_function_arguments: + # booleans + current_dbt_arguments.append(token_value.capitalize()) + elif token_type in Token.Literal and in_dbt_function_arguments: + # all others + current_dbt_arguments.append(token_value) + elif token_type is Token.Punctuation and token_value == ')': + open_parenthesis -= 1 + if open_parenthesis == current_level: + in_dbt_function_arguments = False + in_dbt_function_call = False + dbt_function_calls.append(('.'.join(current_dbt_function), current_dbt_arguments)) + continue + + # identify that there's one and only one function "model" with "dbt" as first argument and up to two arguments total + model_functions = [f for f in function_defs if f['name'] == 'model'] + if len(model_functions) != 1: + raise ParsingError( + f"dbt allows exactly one model defined per python file, found {len(model_functions)}", + node=node, + ) + + model = model_functions[0] + model_errors = [] + if len(model['arguments']) and not model['arguments'][0] == "dbt": + model_errors.append("'dbt' not provided for model as the first argument") + if len(model['arguments']) != 3: + # dbtObj is recognized as a Token.Name, therefore included in the current way of selecting the arguments + model_errors.append( + "model function should have two args, `dbt` of type `dbtObj` and a session to current warehouse of type `SparkSession`" + ) + + if len(model_errors): + raise ParsingError("/n".join(model_errors), node=node) + + # check that get is consistent (1 or 2 arguments) + # adjust config Scala map to Python dict + # add function calls to the context map + # list config keys used and their defaults and add it to the context map + config_keys_used = [] + config_keys_defaults = [] + + for (func, args) in dbt_function_calls: + kwargs = {} + if func == "config.get": + func = "get" + num_args = len(args) + if num_args == 0: + raise ParsingError( + "dbt.config.get() requires at least one argument", + node=node, + ) + if num_args > 2: + raise ParsingError( + f"dbt.config.get() takes at most 2 arguments ({num_args} given)", + node=node, + ) + key = args[0] + default_value = args[1] if num_args == 2 else None + config_keys_used.append(key) + config_keys_defaults.append(default_value) + continue + + if func == "config": + args_it = iter(args) + kwargs = dict(zip(args_it, args_it)) + args = [] + + context[func](*args, **kwargs) + + if config_keys_used: + # this is being used in macro build_config_dict + context["config"]( + config_keys_used=config_keys_used, + config_keys_defaults=config_keys_defaults, + ) + def render_update(self, node: ModelNode, config: ContextConfig) -> None: self.manifest._parsing_info.static_analysis_path_count += 1 flags = get_flags() @@ -242,6 +424,18 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None: # we got a ValidationError - probably bad types in config() raise ModelConfigError(exc, node=node) from exc return + + elif node.language == ModelLanguage.scala: + try: + verify_scala_model_code(node) + context = self._context_for(node, config) + self.parse_scala_model(node, config, context) + self.update_parsed_node_config(node, config, context=context) + except ValidationError as exc: + # we got a ValidationError - probably bad types in config() + raise ModelConfigError(exc, node=node) from exc + return + elif not flags.STATIC_PARSER: # jinja rendering diff --git a/core/dbt/parser/read_files.py b/core/dbt/parser/read_files.py index d07c7fb1d48..9eee3af1996 100644 --- a/core/dbt/parser/read_files.py +++ b/core/dbt/parser/read_files.py @@ -384,7 +384,7 @@ def get_file_types_for_project(project): }, ParseFileType.Model: { "paths": project.model_paths, - "extensions": [".sql", ".py"], + "extensions": [".sql", ".py", ".scala"], "parser": "ModelParser", }, ParseFileType.Snapshot: { diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py index 1b7ef899bd0..06d1a6e7cb8 100644 --- a/core/dbt/tests/fixtures/project.py +++ b/core/dbt/tests/fixtures/project.py @@ -298,7 +298,7 @@ def write_project_files(project_root, dir_name, file_dict): def write_project_files_recursively(path, file_dict): if type(file_dict) is not dict: raise TestProcessingException(f"File dict is not a dict: '{file_dict}' for path '{path}'") - suffix_list = [".sql", ".csv", ".md", ".txt", ".py"] + suffix_list = [".sql", ".csv", ".md", ".txt", ".py", ".scala"] for name, value in file_dict.items(): if name.endswith(".yml") or name.endswith(".yaml"): if isinstance(value, str): diff --git a/core/setup.py b/core/setup.py index 5179e5a8fd1..c867932aad4 100644 --- a/core/setup.py +++ b/core/setup.py @@ -61,6 +61,7 @@ # with major versions in each new minor version of dbt-core. "click<9", "networkx>=2.3,<4", + "Pygments>=2,<3", # ---- # These packages are major-version-0. Keep upper bounds on upcoming minor versions (which could have breaking changes) # and check compatibility / bump in each new minor version of dbt-core.