Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .changes/unreleased/Under the Hood-20230628-142006.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
kind: Under the Hood
body: Populate metric input measures
time: 2023-06-28T14:20:06.022738-07:00
custom:
Author: QMalcolm
Issue: "7884"
70 changes: 64 additions & 6 deletions core/dbt/parser/manifest.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,8 @@
from dbt.dataclass_schema import StrEnum, dbtClassMixin
from dbt.plugins import get_plugin_manager

from dbt_semantic_interfaces.enum_extension import assert_values_exhausted
from dbt_semantic_interfaces.type_enums import MetricType

PARSING_STATE = DbtProcessState("parsing")
PERF_INFO_FILE_NAME = "perf_info.json"
Expand Down Expand Up @@ -1064,16 +1066,15 @@ def process_refs(self, current_project: str, dependencies: Optional[Dict[str, Pr
# node, and updates 'depends_on.nodes' with the unique id
def process_metrics(self, config: RuntimeConfig):
current_project = config.project_name
for node in self.manifest.nodes.values():
if node.created_at < self.started_at:
continue
_process_metrics_for_node(self.manifest, current_project, node)
for metric in self.manifest.metrics.values():
# TODO: Can we do this if the metric is derived & depends on
# some other metric for its definition? Maybe....
if metric.created_at < self.started_at:
continue
_process_metric_node(self.manifest, current_project, metric)
_process_metrics_for_node(self.manifest, current_project, metric)
for node in self.manifest.nodes.values():
if node.created_at < self.started_at:
continue
_process_metrics_for_node(self.manifest, current_project, node)
for exposure in self.manifest.exposures.values():
if exposure.created_at < self.started_at:
continue
Expand Down Expand Up @@ -1439,6 +1440,63 @@ def _process_refs(
node.depends_on.add_node(target_model_id)


def _process_metric_node(
manifest: Manifest,
current_project: str,
metric: Metric,
) -> None:
"""Sets a metric's input_measures"""

# This ensures that if this metrics input_measures have already been set
# we skip the work. This could happen either due to recursion or if multiple
# metrics derive from another given metric.
# NOTE: This does not protect against infinite loops
if len(metric.type_params.input_measures) > 0:
return

if metric.type is MetricType.SIMPLE or metric.type is MetricType.CUMULATIVE:
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Typically for an if statement like this I'd do more of a

if metric.type in [MetricType.SIMPLE, MetricType.CUMULATIVE]:
...

However the type checker help of assert_values_exhausted doesn't work with that kind of formatting. Thus the individual is based checked.

assert (
metric.type_params.measure is not None
), f"{metric} should have a measure defined, but it does not."
metric.type_params.input_measures.append(metric.type_params.measure)

elif metric.type is MetricType.DERIVED or metric.type is MetricType.RATIO:
input_metrics = metric.input_metrics
if metric.type is MetricType.RATIO:
if metric.type_params.numerator is None or metric.type_params.denominator is None:
raise dbt.exceptions.ParsingError(
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wasn't wholly sure if this should be considered a ParsingError or CompilationError given the point at which we are in the manifest loading process. Happy to switch it up if a different error than what I went with is more appropriate.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd call it a parsing error. The boundary isn't very clear because certain Jinja errors are always called compilation errors no matter whether they're in parsing or compilation.

"Invalid ratio metric. Both a numerator and denominator must be specified",
node=metric,
)
input_metrics = [metric.type_params.numerator, metric.type_params.denominator]

for input_metric in input_metrics:
target_metric = manifest.resolve_metric(
target_metric_name=input_metric.name,
target_metric_package=None,
current_project=current_project,
node_package=metric.package_name,
)

if target_metric is None:
raise dbt.exceptions.ParsingError(
f"The metric `{input_metric.name}` does not exist but was referenced.",
node=metric,
)
elif isinstance(target_metric, Disabled):
raise dbt.exceptions.ParsingError(
f"The metric `{input_metric.name}` is disabled and thus cannot be referenced.",
node=metric,
)

_process_metric_node(
manifest=manifest, current_project=current_project, metric=target_metric
)
metric.type_params.input_measures.extend(target_metric.type_params.input_measures)
else:
assert_values_exhausted(metric.type)


def _process_metrics_for_node(
manifest: Manifest,
current_project: str,
Expand Down
24 changes: 20 additions & 4 deletions tests/functional/metrics/fixtures.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,8 @@
description: "The average tenure per person"
type: ratio
type_params:
numerator:
name: years_tenure
denominator:
name: people
numerator: collective_tenure
denominator: number_of_people

- name: average_tenure_plus_one
label: "Average tenure, plus 1"
Expand Down Expand Up @@ -98,6 +96,24 @@
filter: "{{dimension('loves_dbt')}} is true"
window: 14 days

- name: average_tenure
label: Average Tenure
description: The average tenure of our people
type: ratio
type_params:
numerator: collective_tenure
denominator: number_of_people

- name: average_tenure_minus_people
label: Average Tenure minus People
description: Well this isn't really useful is it?
type: derived
type_params:
expr: average_tenure - number_of_people
metrics:
- average_tenure
- number_of_people

"""

invalid_models_people_metrics_yml = """
Expand Down
48 changes: 43 additions & 5 deletions tests/functional/metrics/test_metric_configs.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pytest
from hologram import ValidationError
from dbt.contracts.graph.model_config import MetricConfig
from dbt.exceptions import CompilationError
from dbt.exceptions import CompilationError, ParsingError
from dbt.tests.util import run_dbt, update_config_file, get_manifest


Expand Down Expand Up @@ -36,7 +36,7 @@ def models(self):
def project_config_update(self):
return {
"metrics": {
"number_of_people": {
"average_tenure_minus_people": {
"enabled": True,
},
}
Expand All @@ -45,12 +45,12 @@ def project_config_update(self):
def test_enabled_metric_config_dbt_project(self, project):
run_dbt(["parse"])
manifest = get_manifest(project.project_root)
assert "metric.test.number_of_people" in manifest.metrics
assert "metric.test.average_tenure_minus_people" in manifest.metrics

new_enabled_config = {
"metrics": {
"test": {
"number_of_people": {
"average_tenure_minus_people": {
"enabled": False,
},
}
Expand All @@ -59,7 +59,7 @@ def test_enabled_metric_config_dbt_project(self, project):
update_config_file(new_enabled_config, project.project_root, "dbt_project.yml")
run_dbt(["parse"])
manifest = get_manifest(project.project_root)
assert "metric.test.number_of_people" not in manifest.metrics
assert "metric.test.average_tenure_minus_people" not in manifest.metrics
assert "metric.test.collective_tenure" in manifest.metrics


Expand Down Expand Up @@ -122,13 +122,21 @@ def test_disabled_metric_ref_model(self, project):
assert "metric.test.number_of_people" in manifest.metrics
assert "metric.test.collective_tenure" in manifest.metrics
assert "model.test.people_metrics" in manifest.nodes
assert "metric.test.average_tenure" in manifest.metrics
assert "metric.test.average_tenure_minus_people" in manifest.metrics

new_enabled_config = {
"metrics": {
"test": {
"number_of_people": {
"enabled": False,
},
"average_tenure_minus_people": {
"enabled": False,
},
"average_tenure": {
"enabled": False,
},
}
}
}
Expand All @@ -152,3 +160,33 @@ def test_invalid_config_metric(self, project):
run_dbt(["parse"])
expected_msg = "'True and False' is not of type 'boolean'"
assert expected_msg in str(excinfo.value)


class TestDisabledMetric(MetricConfigTests):
@pytest.fixture(scope="class")
def models(self):
return {
"people.sql": models_people_sql,
"schema.yml": models_people_metrics_yml,
}

def test_disabling_upstream_metric_errors(self, project):
run_dbt(["parse"]) # shouldn't error out yet

new_enabled_config = {
"metrics": {
"test": {
"number_of_people": {
"enabled": False,
},
}
}
}

update_config_file(new_enabled_config, project.project_root, "dbt_project.yml")
with pytest.raises(ParsingError) as excinfo:
run_dbt(["parse"])
expected_msg = (
"The metric `number_of_people` is disabled and thus cannot be referenced."
)
assert expected_msg in str(excinfo.value)
32 changes: 28 additions & 4 deletions tests/functional/metrics/test_metrics.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import pytest

from dbt.tests.util import run_dbt, get_manifest
from dbt.cli.main import dbtRunner
from dbt.contracts.graph.manifest import Manifest
from dbt.exceptions import ParsingError
from dbt.tests.util import run_dbt, get_manifest


from tests.functional.metrics.fixtures import (
Expand Down Expand Up @@ -35,18 +37,40 @@ def test_simple_metric(
self,
project,
):
# initial run
results = run_dbt(["run"])
assert len(results) == 1
runner = dbtRunner()
result = runner.invoke(["parse"])
assert result.success
assert isinstance(result.result, Manifest)
manifest = get_manifest(project.project_root)
metric_ids = list(manifest.metrics.keys())
expected_metric_ids = [
"metric.test.number_of_people",
"metric.test.collective_tenure",
"metric.test.collective_window",
"metric.test.average_tenure",
"metric.test.average_tenure_minus_people",
]
assert metric_ids == expected_metric_ids

assert (
len(manifest.metrics["metric.test.number_of_people"].type_params.input_measures) == 1
)
assert (
len(manifest.metrics["metric.test.collective_tenure"].type_params.input_measures) == 1
)
assert (
len(manifest.metrics["metric.test.collective_window"].type_params.input_measures) == 1
)
assert len(manifest.metrics["metric.test.average_tenure"].type_params.input_measures) == 2
assert (
len(
manifest.metrics[
"metric.test.average_tenure_minus_people"
].type_params.input_measures
)
== 3
)


class TestInvalidRefMetrics:
@pytest.fixture(scope="class")
Expand Down