Skip to content

Commit 0814eee

Browse files
committed
update samples from Release-167 as a part of SDK release
1 parent f45b815 commit 0814eee

File tree

9 files changed

+165
-330
lines changed

9 files changed

+165
-330
lines changed

how-to-use-azureml/automated-machine-learning/automl_env.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ dependencies:
2121
- markupsafe<2.1.0
2222
- tqdm==4.64.1
2323
- jsonschema==4.16.0
24+
- websocket-client==1.4.1
2425

2526
- pip:
2627
# Required packages for AzureML execution, history, and data preparation.

how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/assets/retrain_models.py

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -43,18 +43,28 @@ def init():
4343
global output_dir
4444
global automl_settings
4545
global model_uid
46+
global forecast_quantiles
47+
4648
logger.info("Initialization of the run.")
4749
parser = argparse.ArgumentParser("Parsing input arguments.")
4850
parser.add_argument("--output-dir", dest="out", required=True)
4951
parser.add_argument("--model-name", dest="model", default=None)
5052
parser.add_argument("--model-uid", dest="model_uid", default=None)
53+
parser.add_argument(
54+
"--forecast_quantiles",
55+
nargs="*",
56+
type=float,
57+
help="forecast quantiles list",
58+
default=None,
59+
)
5160

5261
parsed_args, _ = parser.parse_known_args()
5362
model_name = parsed_args.model
5463
automl_settings = _get_automl_settings()
5564
target_column_name = automl_settings.get("label_column_name")
5665
output_dir = parsed_args.out
5766
model_uid = parsed_args.model_uid
67+
forecast_quantiles = parsed_args.forecast_quantiles
5868
os.makedirs(output_dir, exist_ok=True)
5969
os.environ["AUTOML_IGNORE_PACKAGE_VERSION_INCOMPATIBILITIES".lower()] = "True"
6070

@@ -126,23 +136,18 @@ def run_backtest(data_input_name: str, file_name: str, experiment: Experiment):
126136
)
127137
print(f"The model {best_run.properties['model_name']} was registered.")
128138

129-
_, x_pred = fitted_model.forecast(X_test)
130-
x_pred.reset_index(inplace=True, drop=False)
131-
columns = [automl_settings[constants.TimeSeries.TIME_COLUMN_NAME]]
132-
if automl_settings.get(constants.TimeSeries.GRAIN_COLUMN_NAMES):
133-
# We know that fitted_model.grain_column_names is a list.
134-
columns.extend(fitted_model.grain_column_names)
135-
columns.append(constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN)
136-
# Remove featurized columns.
137-
x_pred = x_pred[columns]
138-
x_pred.rename(
139-
{constants.TimeSeriesInternal.DUMMY_TARGET_COLUMN: "predicted_level"},
140-
axis=1,
141-
inplace=True,
142-
)
139+
# By default we will have forecast quantiles of 0.5, which is our target
140+
if forecast_quantiles:
141+
if 0.5 not in forecast_quantiles:
142+
forecast_quantiles.append(0.5)
143+
fitted_model.quantiles = forecast_quantiles
144+
145+
x_pred = fitted_model.forecast_quantiles(X_test)
143146
x_pred["actual_level"] = y_test
144147
x_pred["backtest_iteration"] = f"iteration_{last_training_date}"
148+
x_pred.rename({0.5: "predicted_level"}, axis=1, inplace=True)
145149
date_safe = RE_INVALID_SYMBOLS.sub("_", last_training_date)
150+
146151
x_pred.to_csv(os.path.join(output_dir, f"iteration_{date_safe}.csv"), index=False)
147152
return x_pred
148153

how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -365,6 +365,7 @@
365365
" step_size=BACKTESTING_PERIOD,\n",
366366
" step_number=NUMBER_OF_BACKTESTS,\n",
367367
" model_uid=model_uid,\n",
368+
" forecast_quantiles=[0.025, 0.975], # Optional\n",
368369
")"
369370
]
370371
},
@@ -590,6 +591,7 @@
590591
" step_size=BACKTESTING_PERIOD,\n",
591592
" step_number=NUMBER_OF_BACKTESTS,\n",
592593
" model_name=model_name,\n",
594+
" forecast_quantiles=[0.025, 0.975],\n",
593595
")"
594596
]
595597
},

how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/pipeline_helper.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ def get_backtest_pipeline(
3131
step_number: int,
3232
model_name: Optional[str] = None,
3333
model_uid: Optional[str] = None,
34+
forecast_quantiles: Optional[list] = None,
3435
) -> Pipeline:
3536
"""
3637
:param experiment: The experiment used to run the pipeline.
@@ -44,6 +45,7 @@ def get_backtest_pipeline(
4445
:param step_size: The number of periods to step back in backtesting.
4546
:param step_number: The number of backtesting iterations.
4647
:param model_uid: The uid to mark models from this run of the experiment.
48+
:param forecast_quantiles: The forecast quantiles that are required in the inference.
4749
:return: The pipeline to be used for model retraining.
4850
**Note:** The output will be uploaded in the pipeline output
4951
called 'score'.
@@ -135,6 +137,9 @@ def get_backtest_pipeline(
135137
if model_uid is not None:
136138
prs_args.append("--model-uid")
137139
prs_args.append(model_uid)
140+
if forecast_quantiles:
141+
prs_args.append("--forecast_quantiles")
142+
prs_args.extend(forecast_quantiles)
138143
backtest_prs = ParallelRunStep(
139144
name=parallel_step_name,
140145
parallel_run_config=back_test_config,

how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb

Lines changed: 72 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -575,7 +575,32 @@
575575
"outputs": [],
576576
"source": [
577577
"remote_run.download_file(\"outputs/predictions.csv\", \"predictions.csv\")\n",
578-
"df_all = pd.read_csv(\"predictions.csv\")"
578+
"fcst_df = pd.read_csv(\"predictions.csv\")"
579+
]
580+
},
581+
{
582+
"cell_type": "markdown",
583+
"metadata": {},
584+
"source": [
585+
"Note that the rolling forecast can contain multiple predictions for each date, each from a different forecast origin. For example, consider 2012-09-05:"
586+
]
587+
},
588+
{
589+
"cell_type": "code",
590+
"execution_count": null,
591+
"metadata": {},
592+
"outputs": [],
593+
"source": [
594+
"fcst_df[fcst_df.date == \"2012-09-05\"]"
595+
]
596+
},
597+
{
598+
"cell_type": "markdown",
599+
"metadata": {},
600+
"source": [
601+
"Here, the forecast origin refers to the latest date of actuals available for a given forecast. The earliest origin in the rolling forecast, 2012-08-31, is the last day in the training data. For origin date 2012-09-01, the forecasts use actual recorded counts from the training data *and* the actual count recorded on 2012-09-01. Note that the model is not retrained for origin dates later than 2012-08-31, but the values for model features, such as lagged values of daily count, are updated.\n",
602+
"\n",
603+
"Let's calculate the metrics over all rolling forecasts:"
579604
]
580605
},
581606
{
@@ -587,29 +612,17 @@
587612
"from azureml.automl.core.shared import constants\n",
588613
"from azureml.automl.runtime.shared.score import scoring\n",
589614
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
590-
"from matplotlib import pyplot as plt\n",
591615
"\n",
592616
"# use automl metrics module\n",
593617
"scores = scoring.score_regression(\n",
594-
" y_test=df_all[target_column_name],\n",
595-
" y_pred=df_all[\"predicted\"],\n",
618+
" y_test=fcst_df[target_column_name],\n",
619+
" y_pred=fcst_df[\"predicted\"],\n",
596620
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
597621
")\n",
598622
"\n",
599623
"print(\"[Test data scores]\\n\")\n",
600624
"for key, value in scores.items():\n",
601-
" print(\"{}: {:.3f}\".format(key, value))\n",
602-
"\n",
603-
"# Plot outputs\n",
604-
"%matplotlib inline\n",
605-
"test_pred = plt.scatter(df_all[target_column_name], df_all[\"predicted\"], color=\"b\")\n",
606-
"test_test = plt.scatter(\n",
607-
" df_all[target_column_name], df_all[target_column_name], color=\"g\"\n",
608-
")\n",
609-
"plt.legend(\n",
610-
" (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n",
611-
")\n",
612-
"plt.show()"
625+
" print(\"{}: {:.3f}\".format(key, value))"
613626
]
614627
},
615628
{
@@ -618,8 +631,15 @@
618631
"source": [
619632
"For more details on what metrics are included and how they are calculated, please refer to [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics). You could also calculate residuals, like described [here](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals).\n",
620633
"\n",
621-
"\n",
622-
"Since we did a rolling evaluation on the test set, we can analyze the predictions by their forecast horizon relative to the rolling origin. The model was initially trained at a forecast horizon of 14, so each prediction from the model is associated with a horizon value from 1 to 14. The horizon values are in a column named, \"horizon_origin,\" in the prediction set. For example, we can calculate some of the error metrics grouped by the horizon:"
634+
"The rolling forecast metric values are very high in comparison to the validation metrics reported by the AutoML job. What's going on here? We will investigate in the following cells!"
635+
]
636+
},
637+
{
638+
"cell_type": "markdown",
639+
"metadata": {},
640+
"source": [
641+
"### Forecast versus actuals plot\n",
642+
"We will plot predictions and actuals on a time series plot. Since there are many forecasts for each date, we select the 14-day-ahead forecast from each forecast origin for our comparison."
623643
]
624644
},
625645
{
@@ -628,26 +648,36 @@
628648
"metadata": {},
629649
"outputs": [],
630650
"source": [
631-
"from metrics_helper import MAPE, APE\n",
632-
"\n",
633-
"df_all.groupby(\"horizon_origin\").apply(\n",
634-
" lambda df: pd.Series(\n",
635-
" {\n",
636-
" \"MAPE\": MAPE(df[target_column_name], df[\"predicted\"]),\n",
637-
" \"RMSE\": np.sqrt(\n",
638-
" mean_squared_error(df[target_column_name], df[\"predicted\"])\n",
639-
" ),\n",
640-
" \"MAE\": mean_absolute_error(df[target_column_name], df[\"predicted\"]),\n",
641-
" }\n",
642-
" )\n",
643-
")"
651+
"from matplotlib import pyplot as plt\n",
652+
"\n",
653+
"%matplotlib inline\n",
654+
"\n",
655+
"fcst_df_h14 = (\n",
656+
" fcst_df.groupby(\"forecast_origin\", as_index=False)\n",
657+
" .last()\n",
658+
" .drop(columns=[\"forecast_origin\"])\n",
659+
")\n",
660+
"fcst_df_h14.set_index(time_column_name, inplace=True)\n",
661+
"plt.plot(fcst_df_h14[[target_column_name, \"predicted\"]])\n",
662+
"plt.xticks(rotation=45)\n",
663+
"plt.title(f\"Predicted vs. Actuals\")\n",
664+
"plt.legend([\"actual\", \"14-day-ahead forecast\"])\n",
665+
"plt.show()"
644666
]
645667
},
646668
{
647669
"cell_type": "markdown",
648670
"metadata": {},
649671
"source": [
650-
"To drill down more, we can look at the distributions of APE (absolute percentage error) by horizon. From the chart, it is clear that the overall MAPE is being skewed by one particular point where the actual value is of small absolute value."
672+
"Looking at the plot, there are two clear issues:\n",
673+
"1. An anomalously low count value on October 29th, 2012.\n",
674+
"2. End-of-year holidays (Thanksgiving and Christmas) in late November and late December.\n",
675+
"\n",
676+
"What happened on Oct. 29th, 2012? That day, Hurricane Sandy brought severe storm surge flooding to the east coast of the United States, particularly around New York City. This is certainly an anomalous event that the model did not account for!\n",
677+
"\n",
678+
"As for the late year holidays, the model apparently did not learn to account for the full reduction of bike share rentals on these major holidays. The training data covers 2011 and early 2012, so the model fit only had access to a single occurrence of these holidays. This makes it challenging to resolve holiday effects; however, a larger AutoML model search may result in a better model that is more holiday-aware.\n",
679+
"\n",
680+
"If we filter the predictions prior to the Thanksgiving holiday and remove the anomalous day of 2012-10-29, the metrics are closer to validation levels:"
651681
]
652682
},
653683
{
@@ -656,20 +686,16 @@
656686
"metadata": {},
657687
"outputs": [],
658688
"source": [
659-
"df_all_APE = df_all.assign(APE=APE(df_all[target_column_name], df_all[\"predicted\"]))\n",
660-
"APEs = [\n",
661-
" df_all_APE[df_all[\"horizon_origin\"] == h].APE.values\n",
662-
" for h in range(1, forecast_horizon + 1)\n",
663-
"]\n",
664-
"\n",
665-
"%matplotlib inline\n",
666-
"plt.boxplot(APEs)\n",
667-
"plt.yscale(\"log\")\n",
668-
"plt.xlabel(\"horizon\")\n",
669-
"plt.ylabel(\"APE (%)\")\n",
670-
"plt.title(\"Absolute Percentage Errors by Forecast Horizon\")\n",
689+
"date_filter = (fcst_df.date != \"2012-10-29\") & (fcst_df.date < \"2012-11-22\")\n",
690+
"scores = scoring.score_regression(\n",
691+
" y_test=fcst_df[date_filter][target_column_name],\n",
692+
" y_pred=fcst_df[date_filter][\"predicted\"],\n",
693+
" metrics=list(constants.Metric.SCALAR_REGRESSION_SET),\n",
694+
")\n",
671695
"\n",
672-
"plt.show()"
696+
"print(\"[Test data scores (filtered)]\\n\")\n",
697+
"for key, value in scores.items():\n",
698+
" print(\"{}: {:.3f}\".format(key, value))"
673699
]
674700
}
675701
],
@@ -711,7 +737,7 @@
711737
"name": "python",
712738
"nbconvert_exporter": "python",
713739
"pygments_lexer": "ipython3",
714-
"version": "3.8.5"
740+
"version": "3.7.13"
715741
},
716742
"mimetype": "text/x-python",
717743
"name": "python",

how-to-use-azureml/automated-machine-learning/forecasting-bike-share/forecasting_script.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,18 +36,18 @@
3636

3737
fitted_model = joblib.load("model.pkl")
3838

39-
y_pred, X_trans = fitted_model.rolling_evaluation(X_test_df, y_test_df.values)
39+
X_rf = fitted_model.rolling_forecast(X_test_df, y_test_df.values, step=1)
4040

4141
# Add predictions, actuals, and horizon relative to rolling origin to the test feature data
4242
assign_dict = {
43-
"horizon_origin": X_trans["horizon_origin"].values,
44-
"predicted": y_pred,
45-
target_column_name: y_test_df[target_column_name].values,
43+
fitted_model.forecast_origin_column_name: "forecast_origin",
44+
fitted_model.forecast_column_name: "predicted",
45+
fitted_model.actual_column_name: target_column_name,
4646
}
47-
df_all = X_test_df.assign(**assign_dict)
47+
X_rf.rename(columns=assign_dict, inplace=True)
4848

4949
file_name = "outputs/predictions.csv"
50-
export_csv = df_all.to_csv(file_name, header=True)
50+
export_csv = X_rf.to_csv(file_name, header=True)
5151

5252
# Upload the predictions into artifacts
5353
run.upload_file(name=file_name, path_or_stream=file_name)

how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb

Lines changed: 40 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -758,7 +758,15 @@
758758
"metadata": {},
759759
"source": [
760760
"## Forecasting farther than the forecast horizon <a id=\"recursive forecasting\"></a>\n",
761-
"When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified forecast horizon, the `forecast()` function will still make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future. \n",
761+
"When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified forecast horizon, the forecaster must be iteratively applied. Here, we advance the forecast origin on each iteration over the prediction window, predicting `max_horizon` periods ahead on each iteration. There are two choices for the context data to use as the forecaster advances into the prediction window:\n",
762+
"\n",
763+
"1. We can use forecasted values from previous iterations (recursive forecast),\n",
764+
"2. We can use known, actual values of the target if they are available (rolling forecast).\n",
765+
"\n",
766+
"The first method is useful in a true forecasting scenario when we do not yet know the actual target values while the second is useful in an evaluation scenario where we want to compute accuracy metrics for the `max_horizon`-period-ahead forecaster over a long test set. We refer to the first as a **recursive forecast** since we apply the forecaster recursively over the prediction window and the second as a **rolling forecast** since we roll forward over known actuals.\n",
767+
"\n",
768+
"### Recursive forecasting\n",
769+
"By default, the `forecast()` function will make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future. \n",
762770
"\n",
763771
"To illustrate the use-case and operation of recursive forecasting, we'll consider an example with a single time-series where the forecasting period directly follows the training period and is twice as long as the forecasting horizon given at training time.\n",
764772
"\n",
@@ -818,6 +826,35 @@
818826
"np.array_equal(y_pred_all, y_pred_long)"
819827
]
820828
},
829+
{
830+
"cell_type": "markdown",
831+
"metadata": {},
832+
"source": [
833+
"### Rolling forecasts\n",
834+
"A rolling forecast is a similar concept to the recursive forecasts described above except that we use known actual values of the target for our context data. We have provided a different, public method for this called `rolling_forecast`. In addition to test data and actuals (`X_test` and `y_test`), `rolling_forecast` also accepts an optional `step` parameter that controls how far the origin advances on each iteration. The recursive forecast mode uses a fixed step of `max_horizon` while `rolling_forecast` defaults to a step size of 1, but can be set to any integer from 1 to `max_horizon`, inclusive.\n",
835+
"\n",
836+
"Let's see what the rolling forecast looks like on the long test set with the step set to 1:"
837+
]
838+
},
839+
{
840+
"cell_type": "code",
841+
"execution_count": null,
842+
"metadata": {},
843+
"outputs": [],
844+
"source": [
845+
"X_rf = fitted_model.rolling_forecast(X_test_long, y_test_long, step=1)\n",
846+
"X_rf.head(n=12)"
847+
]
848+
},
849+
{
850+
"cell_type": "markdown",
851+
"metadata": {},
852+
"source": [
853+
"Notice that `rolling_forecast` has returned a single DataFrame containing all results and has generated some new columns: `_automl_forecast_origin`, `_automl_forecast_y`, and `_automl_actual_y`. These are the origin date for each forecast, the forecasted value and the actual value, respectively. Note that \"y\" in the forecast and actual column names will generally be replaced by the target column name supplied to AutoML.\n",
854+
"\n",
855+
"The output above shows forecasts for two prediction windows, the first with origin at the end of the training set and the second including the first observation in the test set (2000-01-01 06:00:00). Since the forecast windows overlap, there are multiple forecasts for most dates which are associated with different origin dates."
856+
]
857+
},
821858
{
822859
"cell_type": "markdown",
823860
"metadata": {},
@@ -880,7 +917,7 @@
880917
"name": "python",
881918
"nbconvert_exporter": "python",
882919
"pygments_lexer": "ipython3",
883-
"version": "3.8.5"
920+
"version": "3.7.13"
884921
},
885922
"tags": [
886923
"Forecasting",
@@ -894,5 +931,5 @@
894931
}
895932
},
896933
"nbformat": 4,
897-
"nbformat_minor": 2
934+
"nbformat_minor": 4
898935
}

0 commit comments

Comments
 (0)