diff --git a/README.md b/README.md
index 33d704d10..bc701a4b2 100644
--- a/README.md
+++ b/README.md
@@ -40,6 +40,7 @@ The [How to use Azure ML](./how-to-use-azureml) folder contains specific example
- [Deployment](./how-to-use-azureml/deployment) - Examples showing how to deploy and manage machine learning models and solutions
- [Azure Databricks](./how-to-use-azureml/azure-databricks) - Examples showing how to use Azure ML with Azure Databricks
- [Monitor Models](./how-to-use-azureml/monitor-models) - Examples showing how to enable model monitoring services such as DataDrift
+- [Reinforcement Learning](./how-to-use-azureml/reinforcement-learning) - Examples showing how to train reinforcement learning agents
---
## Documentation
diff --git a/configuration.ipynb b/configuration.ipynb
index 9bc1c3b42..eae88e17e 100644
--- a/configuration.ipynb
+++ b/configuration.ipynb
@@ -103,7 +103,7 @@
"source": [
"import azureml.core\n",
"\n",
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
diff --git a/how-to-use-azureml/automated-machine-learning/README.md b/how-to-use-azureml/automated-machine-learning/README.md
index ec88126b4..05c61f273 100644
--- a/how-to-use-azureml/automated-machine-learning/README.md
+++ b/how-to-use-azureml/automated-machine-learning/README.md
@@ -144,7 +144,7 @@ jupyter notebook
- Dataset: forecasting for a bike-sharing
- Example of training an automated ML forecasting model on multiple time-series
-- [auto-ml-forecasting-function.ipynb](forecasting-high-frequency/auto-ml-forecasting-function.ipynb)
+- [auto-ml-forecasting-function.ipynb](forecasting-forecast-function/auto-ml-forecasting-function.ipynb)
- Example of training an automated ML forecasting model on multiple time-series
- [auto-ml-forecasting-beer-remote.ipynb](forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb)
diff --git a/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb b/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
index 324cbffa5..84ffd9ccb 100644
--- a/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb
@@ -105,7 +105,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
diff --git a/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb b/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb
index 96cb9a458..c42ecf2e3 100644
--- a/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb
@@ -93,7 +93,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
diff --git a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb
index 13dd8cada..a1debd3b3 100644
--- a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb
@@ -97,7 +97,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
diff --git a/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb b/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb
index 222d3e88b..d70635687 100644
--- a/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb
@@ -88,7 +88,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb
index 7848165fe..32ca68686 100644
--- a/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb
@@ -114,7 +114,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb
index e1fb4412b..bbfe43ac8 100644
--- a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb
@@ -87,7 +87,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -510,16 +510,16 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.automl.core.shared import constants, metrics\n",
+ "from azureml.automl.core.shared import constants\n",
+ "from azureml.automl.runtime.shared.score import scoring\n",
"from sklearn.metrics import mean_absolute_error, mean_squared_error\n",
"from matplotlib import pyplot as plt\n",
"\n",
"# use automl metrics module\n",
- "scores = metrics.compute_metrics_regression(\n",
- " df_all['predicted'],\n",
- " df_all[target_column_name],\n",
- " list(constants.Metric.SCALAR_REGRESSION_SET),\n",
- " None, None, None)\n",
+ "scores = scoring.score_regression(\n",
+ " y_test=df_all[target_column_name],\n",
+ " y_pred=df_all['predicted'],\n",
+ " metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",
diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb
index f439d3486..4b31c0eb7 100644
--- a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb
@@ -97,7 +97,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -465,7 +465,7 @@
"metadata": {},
"source": [
"### Forecast Function\n",
- "For forecasting, we will use the forecast function instead of the predict function. Using the predict method would result in getting predictions for EVERY horizon the forecaster can predict at. This is useful when training and evaluating the performance of the forecaster at various horizons, but the level of detail is excessive for normal use. Forecast function also can handle more complicated scenarios, see notebook on [high frequency forecasting](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb)."
+ "For forecasting, we will use the forecast function instead of the predict function. Using the predict method would result in getting predictions for EVERY horizon the forecaster can predict at. This is useful when training and evaluating the performance of the forecaster at various horizons, but the level of detail is excessive for normal use. Forecast function also can handle more complicated scenarios, see the [forecast function notebook](../forecasting-forecast-function/auto-ml-forecasting-function.ipynb)."
]
},
{
@@ -507,15 +507,15 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.automl.core.shared import constants, metrics\n",
+ "from azureml.automl.core.shared import constants\n",
+ "from azureml.automl.runtime.shared.score import scoring\n",
"from matplotlib import pyplot as plt\n",
"\n",
"# use automl metrics module\n",
- "scores = metrics.compute_metrics_regression(\n",
- " df_all['predicted'],\n",
- " df_all[target_column_name],\n",
- " list(constants.Metric.SCALAR_REGRESSION_SET),\n",
- " None, None, None)\n",
+ "scores = scoring.score_regression(\n",
+ " y_test=df_all[target_column_name],\n",
+ " y_pred=df_all['predicted'],\n",
+ " metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",
@@ -667,15 +667,15 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.automl.core.shared import constants, metrics\n",
+ "from azureml.automl.core.shared import constants\n",
+ "from azureml.automl.runtime.shared.score import scoring\n",
"from matplotlib import pyplot as plt\n",
"\n",
"# use automl metrics module\n",
- "scores = metrics.compute_metrics_regression(\n",
- " df_all['predicted'],\n",
- " df_all[target_column_name],\n",
- " list(constants.Metric.SCALAR_REGRESSION_SET),\n",
- " None, None, None)\n",
+ "scores = scoring.score_regression(\n",
+ " y_test=df_all[target_column_name],\n",
+ " y_pred=df_all['predicted'],\n",
+ " metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",
diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb
similarity index 87%
rename from how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb
rename to how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb
index 1c0e43fff..88fcc3e75 100644
--- a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb
@@ -35,7 +35,6 @@
"Terminology:\n",
"* forecast origin: the last period when the target value is known\n",
"* forecast periods(s): the period(s) for which the value of the target is desired.\n",
- "* forecast horizon: the number of forecast periods\n",
"* lookback: how many past periods (before forecast origin) the model function depends on. The larger of number of lags and length of rolling window.\n",
"* prediction context: `lookback` periods immediately preceding the forecast origin\n",
"\n",
@@ -95,7 +94,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -720,6 +719,90 @@
"X_show[['date', 'grain', 'ext_predictor', '_automl_target_col']]\n",
"# prediction is in _automl_target_col"
]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Forecasting farther than the maximum horizon \n",
+ "When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified maximum horizon, the `forecast()` function will still make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future. \n",
+ "\n",
+ "To illustrate the use-case and operation of recursive forecasting, we'll consider an example with a single time-series where the forecasting period directly follows the training period and is twice as long as the maximum horizon given at training time.\n",
+ "\n",
+ "\n",
+ "\n",
+ "Internally, we apply the forecaster in an iterative manner and finish the forecast task in two interations. In the first iteration, we apply the forecaster and get the prediction for the first max-horizon periods (y_pred1). In the second iteraction, y_pred1 is used as the context to produce the prediction for the next max-horizon periods (y_pred2). The combination of (y_pred1 and y_pred2) gives the results for the total forecast periods. \n",
+ "\n",
+ "A caveat: forecast accuracy will likely be worse the farther we predict into the future since errors are compounded with recursive application of the forecaster.\n",
+ "\n",
+ "\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# generate the same kind of test data we trained on, but with a single grain/time-series and test period twice as long as the max_horizon\n",
+ "_, _, X_test_long, y_test_long = get_timeseries(train_len=n_train_periods,\n",
+ " test_len=max_horizon*2,\n",
+ " time_column_name=TIME_COLUMN_NAME,\n",
+ " target_column_name=TARGET_COLUMN_NAME,\n",
+ " grain_column_name=GRAIN_COLUMN_NAME,\n",
+ " grains=1)\n",
+ "\n",
+ "print(X_test_long.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].min())\n",
+ "print(X_test_long.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].max())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# forecast() function will invoke the recursive forecast method internally.\n",
+ "y_pred_long, X_trans_long = fitted_model.forecast(X_test_long)\n",
+ "y_pred_long"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# What forecast() function does in this case is equivalent to iterating it twice over the test set as the following. \n",
+ "y_pred1, _ = fitted_model.forecast(X_test_long[:max_horizon])\n",
+ "y_pred_all, _ = fitted_model.forecast(X_test_long, np.concatenate((y_pred1, np.full(max_horizon, np.nan))))\n",
+ "np.array_equal(y_pred_all, y_pred_long)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Confidence interval and distributional forecasts\n",
+ "AutoML cannot currently estimate forecast errors beyond the maximum horizon set during training, so the `forecast_quantiles()` function will return missing values for quantiles not equal to 0.5 beyond the maximum horizon. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fitted_model.forecast_quantiles(X_test_long)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Similarly with the simple senarios illustrated above, forecasting farther than the max horizon in other senarios like 'multiple grain', 'Destination-date forecast', and 'forecast away from the training data' are also automatically handled by the `forecast()` function. "
+ ]
}
],
"metadata": {
diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.yml b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.yml
similarity index 100%
rename from how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.yml
rename to how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.yml
diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/forecast_function_at_train.png b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/forecast_function_at_train.png
similarity index 100%
rename from how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/forecast_function_at_train.png
rename to how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/forecast_function_at_train.png
diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/forecast_function_away_from_train.png b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/forecast_function_away_from_train.png
similarity index 100%
rename from how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/forecast_function_away_from_train.png
rename to how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/forecast_function_away_from_train.png
diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter1.png b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter1.png
new file mode 100644
index 000000000..2962f5644
Binary files /dev/null and b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter1.png differ
diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter2.png b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter2.png
new file mode 100644
index 000000000..180f59d4f
Binary files /dev/null and b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_iter2.png differ
diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_overview_small.png b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_overview_small.png
new file mode 100644
index 000000000..0f3e687e2
Binary files /dev/null and b/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/recursive_forecast_overview_small.png differ
diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
index f4f73dd03..4b592a958 100644
--- a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb
@@ -82,7 +82,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
@@ -545,7 +545,7 @@
"source": [
"If you are used to scikit pipelines, perhaps you expected `predict(X_test)`. However, forecasting requires a more general interface that also supplies the past target `y` values. Please use `forecast(X,y)` as `predict(X)` is reserved for internal purposes on forecasting models.\n",
"\n",
- "The [forecast function notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb) demonstrates the use of the forecast function for a variety of use cases. Also, please see the [API documentation for the forecast function](https://docs.microsoft.com/en-us/python/api/azureml-automl-runtime/azureml.automl.runtime.shared.model_wrappers.forecastingpipelinewrapper?view=azure-ml-py#forecast-x-pred--typing-union-pandas-core-frame-dataframe--nonetype----none--y-pred--typing-union-pandas-core-frame-dataframe--numpy-ndarray--nonetype----none--forecast-destination--typing-union-pandas--libs-tslibs-timestamps-timestamp--nonetype----none--ignore-data-errors--bool---false-----typing-tuple-numpy-ndarray--pandas-core-frame-dataframe-)."
+ "The [forecast function notebook](../forecasting-forecast-function/auto-ml-forecasting-function.ipynb)."
]
},
{
@@ -576,15 +576,15 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.automl.core.shared import constants, metrics\n",
+ "from azureml.automl.core.shared import constants\n",
+ "from azureml.automl.runtime.shared.score import scoring\n",
"from matplotlib import pyplot as plt\n",
"\n",
- "# use automl metrics module\n",
- "scores = metrics.compute_metrics_regression(\n",
- " df_all['predicted'],\n",
- " df_all[target_column_name],\n",
- " list(constants.Metric.SCALAR_REGRESSION_SET),\n",
- " None, None, None)\n",
+ "# use automl scoring module\n",
+ "scores = scoring.score_regression(\n",
+ " y_test=df_all[target_column_name],\n",
+ " y_pred=df_all['predicted'],\n",
+ " metrics=list(constants.Metric.SCALAR_REGRESSION_SET))\n",
"\n",
"print(\"[Test data scores]\\n\")\n",
"for key, value in scores.items(): \n",
diff --git a/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb b/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb
index 9750a559e..beb841fb0 100644
--- a/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb
@@ -95,7 +95,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
diff --git a/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb b/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb
index 099b7539b..932b35ceb 100644
--- a/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb
@@ -98,7 +98,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
diff --git a/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/score_explain.py b/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/score_explain.py
index 433af5f43..a3f5c1353 100644
--- a/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/score_explain.py
+++ b/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/score_explain.py
@@ -7,7 +7,7 @@
import azureml.explain.model
from azureml.train.automl.runtime.automl_explain_utilities import AutoMLExplainerSetupClass, \
automl_setup_model_explanations
-from sklearn.externals import joblib
+import joblib
from azureml.core.model import Model
diff --git a/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb b/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb
index a8992a596..ab06d74fe 100644
--- a/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb
+++ b/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb
@@ -92,7 +92,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
diff --git a/how-to-use-azureml/deployment/deploy-to-local/myenv.yml b/how-to-use-azureml/deployment/deploy-to-local/myenv.yml
index 36ee6703a..095ca4c7b 100644
--- a/how-to-use-azureml/deployment/deploy-to-local/myenv.yml
+++ b/how-to-use-azureml/deployment/deploy-to-local/myenv.yml
@@ -3,6 +3,6 @@ dependencies:
- python=3.6.2
- pip:
- azureml-defaults
- - scikit-learn
+ - scikit-learn==0.19.1
- numpy
- inference-schema[numpy-support]
diff --git a/how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local.ipynb b/how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local.ipynb
index fd6c888fb..69a72c4b8 100644
--- a/how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local.ipynb
+++ b/how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local.ipynb
@@ -233,7 +233,8 @@
" 'inference-schema[numpy-support]',\n",
" 'joblib',\n",
" 'numpy',\n",
- " 'scikit-learn'\n",
+ " 'scikit-learn==0.19.1',\n",
+ " 'scipy'\n",
"])\n",
"inference_config = InferenceConfig(entry_script='score.py', environment=environment)\n",
"# if cpu and memory_in_gb parameters are not provided\n",
diff --git a/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb b/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb
index 91479a4cb..1dc2d3e79 100644
--- a/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb
+++ b/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb
@@ -5,7 +5,7 @@
"metadata": {},
"source": [
"# Enabling App Insights for Services in Production\n",
- "With this notebook, you can learn how to enable App Insights for standard service monitoring, plus, we provide examples for doing custom logging within a scoring files in a model. \n",
+ "With this notebook, you can learn how to enable App Insights for standard service monitoring, plus, we provide examples for doing custom logging within a scoring files in a model.\n",
"\n",
"\n",
"## What does Application Insights monitor?\n",
@@ -45,11 +45,13 @@
"metadata": {},
"outputs": [],
"source": [
+ "import azureml.core\n",
+ "import json\n",
+ "\n",
"from azureml.core import Workspace\n",
"from azureml.core.compute import AksCompute, ComputeTarget\n",
"from azureml.core.webservice import AksWebservice\n",
- "import azureml.core\n",
- "import json\n",
+ "\n",
"print(azureml.core.VERSION)"
]
},
@@ -67,7 +69,7 @@
"outputs": [],
"source": [
"ws = Workspace.from_config()\n",
- "print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
+ "print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\\n')"
]
},
{
@@ -84,13 +86,13 @@
"metadata": {},
"outputs": [],
"source": [
- "#Register the model\n",
- "from azureml.core.model import Model\n",
- "model = Model.register(model_path = \"sklearn_regression_model.pkl\", # this points to a local file\n",
- " model_name = \"sklearn_regression_model.pkl\", # this is the name the model is registered as\n",
- " tags = {'area': \"diabetes\", 'type': \"regression\"},\n",
- " description = \"Ridge regression model to predict diabetes\",\n",
- " workspace = ws)\n",
+ "from azureml.core import Model\n",
+ "\n",
+ "model = Model.register(model_path=\"sklearn_regression_model.pkl\", # This points to a local file.\n",
+ " model_name=\"sklearn_regression_model.pkl\", # This is the name the model is registered as.\n",
+ " tags={'area': \"diabetes\", 'type': \"regression\"},\n",
+ " description=\"Ridge regression model to predict diabetes\",\n",
+ " workspace=ws)\n",
"\n",
"print(model.name, model.description, model.version)"
]
@@ -120,7 +122,7 @@
"import os\n",
"import pickle\n",
"import json\n",
- "import numpy \n",
+ "import numpy\n",
"from sklearn.externals import joblib\n",
"from sklearn.linear_model import Ridge\n",
"import time\n",
@@ -129,15 +131,15 @@
" global model\n",
" #Print statement for appinsights custom traces:\n",
" print (\"model initialized\" + time.strftime(\"%H:%M:%S\"))\n",
- " \n",
+ "\n",
" # AZUREML_MODEL_DIR is an environment variable created during deployment.\n",
" # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)\n",
" # For multiple models, it points to the folder containing all deployed models (./azureml-models)\n",
" model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_regression_model.pkl')\n",
- " \n",
+ "\n",
" # deserialize the model file back into a sklearn model\n",
" model = joblib.load(model_path)\n",
- " \n",
+ "\n",
"\n",
"# note you can pass in multiple rows for scoring\n",
"def run(raw_data):\n",
@@ -168,7 +170,7 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.core.conda_dependencies import CondaDependencies \n",
+ "from azureml.core.conda_dependencies import CondaDependencies\n",
"\n",
"myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'],\n",
" pip_packages=['azureml-defaults'])\n",
@@ -190,9 +192,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.core.model import InferenceConfig\n",
"from azureml.core.environment import Environment\n",
- "\n",
+ "from azureml.core.model import InferenceConfig\n",
"\n",
"myenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\n",
"inference_config = InferenceConfig(entry_script=\"score.py\", environment=myenv)"
@@ -213,11 +214,11 @@
"source": [
"from azureml.core.webservice import AciWebservice\n",
"\n",
- "aci_deployment_config = AciWebservice.deploy_configuration(cpu_cores = 1, \n",
- " memory_gb = 1, \n",
- " tags = {'area': \"diabetes\", 'type': \"regression\"}, \n",
- " description = 'Predict diabetes using regression model',\n",
- " enable_app_insights = True)"
+ "aci_deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,\n",
+ " memory_gb=1,\n",
+ " tags={'area': \"diabetes\", 'type': \"regression\"},\n",
+ " description=\"Predict diabetes using regression model\",\n",
+ " enable_app_insights=True)"
]
},
{
@@ -226,27 +227,12 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.core.webservice import Webservice\n",
+ "aci_service_name = \"aci-service-appinsights\"\n",
"\n",
- "aci_service_name = 'my-aci-service-4'\n",
- "aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aci_deployment_config)\n",
- "aci_service.wait_for_deployment(True)\n",
- "print(aci_service.state)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "%%time\n",
+ "aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aci_deployment_config, overwrite=True)\n",
+ "aci_service.wait_for_deployment(show_output=True)\n",
"\n",
- "test_sample = json.dumps({'data': [\n",
- " [1,28,13,45,54,6,57,8,8,10], \n",
- " [101,9,8,37,6,45,4,3,2,41]\n",
- "]})\n",
- "test_sample = bytes(test_sample,encoding='utf8')"
+ "print(aci_service.state)"
]
},
{
@@ -256,7 +242,15 @@
"outputs": [],
"source": [
"if aci_service.state == \"Healthy\":\n",
- " prediction = aci_service.run(input_data=test_sample)\n",
+ " test_sample = json.dumps({\n",
+ " \"data\": [\n",
+ " [1,28,13,45,54,6,57,8,8,10],\n",
+ " [101,9,8,37,6,45,4,3,2,41]\n",
+ " ]\n",
+ " })\n",
+ "\n",
+ " prediction = aci_service.run(test_sample)\n",
+ "\n",
" print(prediction)\n",
"else:\n",
" raise ValueError(\"Service deployment isn't healthy, can't call the service. Error: \", aci_service.error)"
@@ -282,14 +276,21 @@
"metadata": {},
"outputs": [],
"source": [
- "# Use the default configuration (can also provide parameters to customize)\n",
- "prov_config = AksCompute.provisioning_configuration()\n",
+ "from azureml.exceptions import ComputeTargetException\n",
+ "\n",
+ "aks_name = \"my-aks\"\n",
+ "\n",
+ "try:\n",
+ " aks_target = ComputeTarget(ws, aks_name)\n",
+ " print(\"Using existing AKS cluster {}.\".format(aks_name))\n",
+ "except ComputeTargetException:\n",
+ " print(\"Creating a new AKS cluster {}.\".format(aks_name))\n",
"\n",
- "aks_name = 'my-aks-test3' \n",
- "# Create the cluster\n",
- "aks_target = ComputeTarget.create(workspace = ws, \n",
- " name = aks_name, \n",
- " provisioning_configuration = prov_config)"
+ " # Use the default configuration (can also provide parameters to customize).\n",
+ " prov_config = AksCompute.provisioning_configuration()\n",
+ " aks_target = ComputeTarget.create(workspace=ws,\n",
+ " name=aks_name,\n",
+ " provisioning_configuration=prov_config)"
]
},
{
@@ -299,7 +300,8 @@
"outputs": [],
"source": [
"%%time\n",
- "aks_target.wait_for_completion(show_output = True)"
+ "if aks_target.provisioning_state != \"Succeeded\":\n",
+ " aks_target.wait_for_completion(show_output=True)"
]
},
{
@@ -323,13 +325,13 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "```python \n",
+ "```python\n",
"%%time\n",
"resource_id = '/subscriptions//resourcegroups//providers/Microsoft.ContainerService/managedClusters/'\n",
"create_name= 'myaks4'\n",
"attach_config = AksCompute.attach_configuration(resource_id=resource_id)\n",
- "aks_target = ComputeTarget.attach(workspace = ws, \n",
- " name = create_name, \n",
+ "aks_target = ComputeTarget.attach(workspace=ws,\n",
+ " name=create_name,\n",
" attach_configuration=attach_config)\n",
"## Wait for the operation to complete\n",
"aks_target.wait_for_provisioning(True)```"
@@ -349,7 +351,7 @@
"metadata": {},
"outputs": [],
"source": [
- "#Set the web service configuration\n",
+ "# Set the web service configuration.\n",
"aks_deployment_config = AksWebservice.deploy_configuration(enable_app_insights=True)"
]
},
@@ -366,15 +368,16 @@
"metadata": {},
"outputs": [],
"source": [
- "if aks_target.provisioning_state== \"Succeeded\": \n",
- " aks_service_name ='aks-w-dc5'\n",
+ "if aks_target.provisioning_state == \"Succeeded\":\n",
+ " aks_service_name = \"aks-service-appinsights\"\n",
" aks_service = Model.deploy(ws,\n",
- " aks_service_name, \n",
- " [model], \n",
- " inference_config, \n",
- " aks_deployment_config, \n",
- " deployment_target = aks_target) \n",
- " aks_service.wait_for_deployment(show_output = True)\n",
+ " aks_service_name,\n",
+ " [model],\n",
+ " inference_config,\n",
+ " aks_deployment_config,\n",
+ " deployment_target=aks_target,\n",
+ " overwrite=True)\n",
+ " aks_service.wait_for_deployment(show_output=True)\n",
" print(aks_service.state)\n",
"else:\n",
" raise ValueError(\"AKS provisioning failed. Error: \", aks_service.error)"
@@ -395,13 +398,14 @@
"source": [
"%%time\n",
"\n",
- "test_sample = json.dumps({'data': [\n",
- " [1,28,13,45,54,6,57,8,8,10], \n",
- " [101,9,8,37,6,45,4,3,2,41]\n",
- "]})\n",
- "test_sample = bytes(test_sample,encoding='utf8')\n",
- "\n",
"if aks_service.state == \"Healthy\":\n",
+ " test_sample = json.dumps({\n",
+ " \"data\": [\n",
+ " [1,28,13,45,54,6,57,8,8,10],\n",
+ " [101,9,8,37,6,45,4,3,2,41]\n",
+ " ]\n",
+ " })\n",
+ "\n",
" prediction = aks_service.run(input_data=test_sample)\n",
" print(prediction)\n",
"else:\n",
@@ -435,7 +439,7 @@
"outputs": [],
"source": [
"aks_service.update(enable_app_insights=False)\n",
- "aks_service.wait_for_deployment(show_output = True)"
+ "aks_service.wait_for_deployment(show_output=True)"
]
},
{
diff --git a/how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.ipynb b/how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.ipynb
index 9d5b89be0..bcf507019 100644
--- a/how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.ipynb
+++ b/how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.ipynb
@@ -115,6 +115,11 @@
"# Convert from CoreML into ONNX\n",
"onnx_model = onnxmltools.convert_coreml(coreml_model, 'TinyYOLOv2')\n",
"\n",
+ "# Fix the preprocessor bias in the ImageScaler\n",
+ "for init in onnx_model.graph.initializer:\n",
+ " if init.name == 'scalerPreprocessor_bias':\n",
+ " init.dims[1] = 1\n",
+ "\n",
"# Save ONNX model\n",
"onnxmltools.utils.save_model(onnx_model, 'tinyyolov2.onnx')\n",
"\n",
@@ -255,7 +260,7 @@
"source": [
"from azureml.core.conda_dependencies import CondaDependencies \n",
"\n",
- "myenv = CondaDependencies.create(pip_packages=[\"numpy\", \"onnxruntime==0.4.0\", \"azureml-core\", \"azureml-defaults\"])\n",
+ "myenv = CondaDependencies.create(pip_packages=[\"numpy\", \"onnxruntime\", \"azureml-core\", \"azureml-defaults\"])\n",
"\n",
"with open(\"myenv.yml\",\"w\") as f:\n",
" f.write(myenv.serialize_to_string())"
@@ -316,7 +321,7 @@
"metadata": {},
"outputs": [],
"source": [
- "aci_service_name = 'my-aci-service-15ad'\n",
+ "aci_service_name = 'my-aci-service-tiny-yolo'\n",
"print(\"Service\", aci_service_name)\n",
"aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)\n",
"aci_service.wait_for_deployment(True)\n",
diff --git a/how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.yml b/how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.yml
index 7ea284e5a..e8c7ffb90 100644
--- a/how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.yml
+++ b/how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.yml
@@ -4,4 +4,5 @@ dependencies:
- azureml-sdk
- numpy
- git+https://github.com/apple/coremltools@v2.1
+ - onnx<1.7.0
- onnxmltools
diff --git a/how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.yml b/how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.yml
index 8d9a9c4ba..59a17443e 100644
--- a/how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.yml
+++ b/how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.yml
@@ -5,5 +5,5 @@ dependencies:
- azureml-widgets
- matplotlib
- numpy
- - onnx
+ - onnx<1.7.0
- opencv-python-headless
diff --git a/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.yml b/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.yml
index 0d73085a8..97f9e8b51 100644
--- a/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.yml
+++ b/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.yml
@@ -5,5 +5,5 @@ dependencies:
- azureml-widgets
- matplotlib
- numpy
- - onnx
+ - onnx<1.7.0
- opencv-python-headless
diff --git a/how-to-use-azureml/deployment/tensorflow/tensorflow-flower-predict-input.json b/how-to-use-azureml/deployment/tensorflow/tensorflow-flower-predict-input.json
deleted file mode 100644
index f3c053937..000000000
--- a/how-to-use-azureml/deployment/tensorflow/tensorflow-flower-predict-input.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "instances": [
- {
- "image_bytes": {
- "b64": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAErASsDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwC9A42ir9vA0nOOKxYJhkDqe1bNvO0ZAYdaIsC8LLjOwH60yWDAwY1/75qzDcDAz0qfhl55BqxGE1pCzZwVPt0qJ7MgZQbh7da1Z7bncBVQgoaVhlGFvKlBIwOhqxPFxkdKmdY5xiQYP94daaqtGPKkO5P4X/pU2AoKMMQatWv+tAPXpTJ4ipyBTVYqwYHBFTezA1ivHNRsuRU1tOlymOBIOo9aVoyGNaCIEHanEEEMKXbg07BAx2NICXO5KrvwcVPEcDFRyD5qTYDYhuPuKnA4waitxmQj1FWGX9Ka2ArODzUXU5qxIM81AODzUtjGzHMfvVRcl6mmOMio4V3PSAtwjBUd60l+6DVCMAzH2q6D8v0qo7CIJ3xmsqQ8kmtC5YAVmyctntSbGRkDOT0qWMFyABUWNzD0q5EuxM9zQgJQAqgCkJxS9vemMasA3c8CpFPHNRBgBkinBvSpuBMGxRnPWo1561IOlMBQMEU2R8DFKW2rk1XdsmgCN+TmqskuHIqeUhVNZMkoZyckZqQILTi5UntzWtHMOVbpWQh2zCr6jIBpRGzUjl2jBPHY1chuSODyKx4pOzdKnVyh68VYjbDBlyvSq88G4bhVeG4Kkc8HrV3eGUEVQjLkUr+FRmQgYzV+aMODxzWdIpU0mMerh1wahdCpPvTN21gQamB3jB+qn1rOQDI5GjcMvBFbdvMt1FkfeHWsJhzU1pcG3nDZ4PWlGVgNd4+MigL8uKscMgdeVNRsAORVsRGFwc1G45qfKg/MM/U0jLG3RQPxNS2BCh2OG9DVxwM57GqxRQc8j9asp80I5zjiiIyu64zVdhxVtwMVVak2BUlOTUlumATTXXmpPux0r6AS2vLv7GrLNtFVbM/K596knbgGqT0AqXLZeqbgsRU8x96hJzgCk2A+JPmA61PA4mUSL9wk7fcetULtmEMdvGSJrltgI6hQMsfwH6kVqRIsUaqgAVQAAOwFUgEJ7UwDOc1Ky55/OmtgcCi4EZ6UqqSc0Hk4p46igB44pQaaM5NI7hVx3qkA2V8nHaoAdzE9hTZHOMd6ZczfZoQq/fNDArahcgAxLyf4iKzs0OxJ5696ZUDQP97NaVsdyg+1IPszHlFzU8SRg4jGB6VSQh3linp02mpQm5enNJs9aoBoynfirMFwVOD0qADjDUn3W9qANIsGGQeKqXCK3PekjlIOCeKfJyN1AGXIMZFNik6xscc5U+hqxMgbPrVFwVas2BezvXOMOPvCo2GD7UyOXOG/iHX3p8hGzdn6Vm0M0rG8ZLYxtzz8pp0lyx/iNZUMpzzVkturURKZGP8AEfzpRMw6Nmq5HvTMspz1pAaUVzzhjiptxjPmRnjuKyBNzzxU8NwUbDcqaXoBreYJU3L+VVn5zTEcRvkHKNUjcE4qZdwITyabK3yGpG4GaqzN+7qG9ALNicwn/eNE75UgU2zb/RQfc0krY4rS+gFZgcc0iKM+9Kc81FcI727QxnDyjbu9AepoWrAZpv8Apt7NqB5jA8mAeig/MfxOPyFa4HFQ20KW8KQxrhEUKB7VYXFWAvlkhSDx1PvUchwSAKlD7Uyep6CoS3UnrU9QGHg0DJ5xSb8mjdjvVAOZ9oqs75JOaJX3Hg1GBmmAKRuLt0Xms24lMshbt2qxezgDyEPuxqkxpNjImo4pGOOarmbk0gJvMINWIbp42BB6VBLC0Z9qjVsGjYDqrWVJ4w6n6j0qcxbh71ztndtbyBlPHcetdLayx3Me+Ns+o7irTuJkDRDvwaYVIODWg0IYc9agkgBGDTEUyCv0pwc4wac8ZTg9KjYFRSAil6ZxVOYZFXGPBBqpKKljIFJB61KzFlqJhTkbPBrO+oxysR0qwrkjk1Wxg1IoPBJ4PpSc7BYkOfU0zzHXvke9WNuFBHQ1A/BrKVRoaQm/dweDShyOOtRZB+tAPHNSq6HYv203/LNuh6VeVvkweq1ioxB+lacUm5Nw7jBrVTUoktEsp+SqNw2F4q1I3yCqM5yQKwchpF62Yi0/GkcHgmkh4gAoOSK1UxEfVuKdbKZH8zseF+nrTGO0qo6scVciXgYrWG1xDxwcUm/GQKc3FVS3J5qmwHmUkknoKYXJGaZuBzTd1JMB4PBzxUUkhPApWcnpTFUk1SAdGrOcVW1S/TTbbIwZW4Rff1rQXbEmSefSqC6bHPdNd3Q86U/dDfdQegFUBkWYurnlI2YseWPetSPSZW5llC+yjJrUVABjt6CnHihJICkulWqcsGf/AHjUwt4QMCGMD/cFSM4HWm+YKq4HO/aAww3NRuqtypxTNpFIMisrjFDFDg1ctbySCQSRsQf5/Wqm4MOaT6GlewHY2OrQXWFciOT0J4NaDRq68/nXBLIRwa0bTVLi2wBIWX0bmqU+4rHSSQcFW5HY1TkiKZBGRUtrq8NyNr8N6VaYJIvysCPar0YjGdMDOeKrSL6ng1pXFuUJxyDVCSMgH0qGMqMvao84NTOp61XYkHmspDRYXBxnpSFjG2DyKbEdwK96eR5ilT95elZSKRYglBHlseD0NJKpU4NUlk2nB6VcjlEq7HPzDofWsJTvox2KzcZojbccGnSrgkVCpIkFcdaorblJFg8VctHzlapBs1Ztf9bTw+K01CUS07dvaqMh3TqKulSSTjPHWqCgGdnzkKOtYLGxbDkZoxnEYqR4j5e4HPqKrwncAO2M1X1jVRp+lXFxn7inHu3QCu6E3JKzIaC1lF3qk+05jtgIh/vnlj+AwPzrXUYArnvB0LLoUMshJknZpXJ7kmuhLAZxXcpIgimYjPNVd3HWpJnLHFQgcYp81xjht7U4AGkGFpDPt4QZNaIQ9YiRnoPemng4Tk+tIFdzmRjj0p5dEHFWmAqRfxOeakyAOwquZ93Sk5PJouBM0uKjMrHpUeRn5qQydhRcB3Pc0vHrUYJanbT6UwMX6ikKelafA/gX8qRljPWJfwzScAMsxjNJtx9K0Ht42HykqffkVA8DpzjI9RzUOLQ7lXbijDdRU2zIppX3qRipJ0z1rQttQkj4Ylh655FZjLzQMryDRewHUwXyTphiDnof8aZcQYO5eRXPw3BQ5U4Na1tf5Xa3I9KtSuTYgkjK5x0qpIpwM9K2HRJlLIefSqEsRGR+lRJAUVcxuCO1WpDlROn4iqknTp0p1tPtYo3KNXJOdtGWlcllUMBIvRuv1oGVOD1FOWIiRIz91zgH0qS6VoNwaMgKeJQMgfWvExGK960TeMRpPmqMnD4796JIQsf7s5lGAQT61YeSOS8SFwFkH3WHRuKq6fFJdajMZ3aMRtgqByT269q4nUnJXb2KskMjUiIS7sl32hfTtWhaArIQ42kdaaY/7PvzG6+dFMSWfHC91+nvS2e+Sab7QORNsUZwG7jn0xmpc3a6FYnDpC4lDEl/4G6DtVa5UqRDEADKTj2Hqas3Nkz6hGZGxCFJLL/FzwKluIykkQgQtC53N3Kn0+maxUrNMvRoiEZWPapAJ71keItPkvdPYiNnjg+cQjOZT/8AW61uOY1cGUcryFzUgkZ0JVsDtXfDGWmm9kZuGhX0qJLXT4IACuyMfKe1WZJQFOKiKlSCWycfnUUjjua9ali41PhZk42EbLHNISFFM83nA5pVXPLV3Ql2IaDBf2FOykQycUySUIMDrVZi0h5NbJiJnuGc4WkVWP3qYo29OtSKrt34q0wHhgo4GaCx/CnLCw561OkJP8NWIrBd/apVtj3qysI74FSZjTimBElsO1S+QKQzf3cCk3v60AUiqEcVG0R7DNPIZT92kLY5yRWgFd0I7UwMyGrLHPYGoXQHkVLAYYUlGUwj/oarMhRyrqQam3FDU6yRzoEl7dG7ioaTAzyuR0phQ+tXJreSLn7yHow6GoNhas2h3K5UA5zSrIUI5qQx5qIoBwRUMZehvMEZbB7GrS3Mdy/lMQk3YH+L6HvWJnHHpSs0c0ZinBKeoOCp9Qe1Tz2CxcvY2jY8VXgYMrhhhf73pTY728geO3ukW9tXztuPusvfDe9aECQmMqE3I/OD1FeVmFaKVlua049yNLooVjLD5MMcjORmrZN3LfmHzVjg2bn3LkMvp9TVeS6htbNXSNTk5RiPSrdhdx3ds0lztIcHJHHHt714cr/FY2JTbwGzDJ80kBym48/5xTZHzLFLLEygryw43+lFvJa/YZF52AZEh5IIpkN6k2yCcOcHdtIxzUaktl10gup02zMqlcFRyM0omRElhk25QYGBzu9qqLav9rl+ySII+q7mzz6UxHiXzYbkL9oHzGQHnd2x7VLj0Fc0Fml+y5kjbY2OT/CakaRSEMY/eE7do71krqBkgWNllCzMOdp/P0q35aQPEYHd9wKlSMnjvUuFtykyeW1ju51WQsjIm38e1V5LWRSiGcDy+Hx/ETzmpxK0kgl2ERYwzHru+lJe2pn8trc4kzyC3BHrSTa0ZRG7IkQIbcvTJ65qsUeZ2H3VXqT6065kkSfyIrZiQMFmHHHU06TKWyq7ZbGSfU16GB91uUnotTOYxIVU/eyaSRivFAxgHOaGw3yn8K9+hVhWjeBi01uQFWY05YSe9L5wXjbUiTqeq4+ldcSWSRwkdRmp1RVGW4qITf3TxSg7jljWqJJRJGvTrSGZj0OBTfK3Hini3brV2Ab5jNxTgCetSCE9+DR5ZxinYBAqjmjzVpPKOOtHlU7AVRLwOhoyHFZyuw6VMsz9aq4FghajYYHBpPNzzRvDD0pMCB8HtioiQOhqV1Peq0kZBz2qWBbgu2i44ZT1U9DVwWttdjdAxjfvH/hWGWZDx0qWO4dCGBII7ip5h2L82nlTgvtP+0KrPZSjkbH+jVft9VhnTyrtQQeN4qO7025C+bp06TL/AM8pDj8mH9RSaT2AyJo2X7yEH3FVJJFUdeadN4hlsJvK1Kxnt+cbiNy/nTzf6XerkBTnupK1yVdFcpDdME8935SofKP3s9vetcmCxTZcDcm4/Oh5APrVWK4kuA0dtHjbznODTzAbiaGV0EinO5Dkfn+NfN4ibqTvLRHTHRE8losVspkKT23VY+4z3z9Ka8FvayRyQiT7G/OGHCmp44omsnW5LbsHdGrcLjgfXtQdQVdOkhYeblNqoozx9K57vYTIbmWAT2zoGUOSGUDCk9jS3Fwl7fpsuFjZUAdgM5PSobm/hmsIbZnVmaQEJjgge/arN1FHdxWoh8qBgx3N/s/TuelVta5DZWN79gmliMgaVMkcEBqnhube4s5FmTMjjcGbruPpisvVIn0u5jE9wkz3PAcLjaKkb7Pp8kEkL7lkByHP3SMcj3q+RWTW7Iua0OpLJYzQyZ+VMFAMMD2xn3plnfva3Dw3aeXMg4UntjrVC9vXk8m+ijXbG20kA7m96fNqCSz29y6EmL5Hcp0zjGah0tNtylI14dTVpZHPIz90jk1Na5aLzvNBc/MExwB6fWq7zW63cExVBI8fGO/PFSXksUcgMZbMv+s2jhPeudxWyNUyO7vfOuo0BYKAGc+vtUVwr3upCFNywxrukfH8q02mha0kAjUqqZB6Z46VAl15kGyFQCVOBn+tVSqcmtgavoQtDDgorlSOnOahkzCu6QjaOd3bFPQmGVEeAmM43yZ6+uKknaPa8IAdG4OfSvewlanJ6JL0MZJmb9ttWOVcyeyKT/TFC3Fy5/0fTZCv964kWIfhjcf0qyMRjCKFHsMU1mYmvWi0ZsehuMfvPIiOekbGT9SF/lUhl2+hqDDkU3Yx5NbJiLH2kjocU8XLf3jVUITRsNXcRdE7H+Ol85/7xqmEYdDThuHencC557g/epftL+oqllqTLU7gQKvNPCkHBNOVkIG5amVI2Aw2KYEITqKTYQfSrnkHqMGlMB/u8UgKTKfc00oSORV4REdRQbfnikwMxoh0IqJodp+WtY24I55NNNvj+GpaAx2hYcg0+C5ntmzHIw9uoNXmtjk4FQSQbe1ZO62KRZTVbe7Qw30KAHuRlTUN5YlSj2RhEWeU8sFT9DVCZAB2qvDcSQMfLkIH90nINcmJqtQa6lRjqaUTNNI1rtWJ8E7umfUVIoffHapcA7Ry5GCB/WoLC4juhKZGjSUHaoc4/HPTFVPImsbo3VyywhMjy2OSwPpivm3FuTT3OnoXLuAxTC3FzuWYFtxGDwelN06eHTmfjYe4cckVMyiOSO4nlWSXd8qfwjIqS6uYZb+1V9nmxhjz9OKm+ljNmJetBJqdw8mYlAHl4UqPekt5tQnuVeOESW6ggEN6VY8Tajb6nHBYxnE5bJbHQVDpMdxbXCWECmYjJGDwfqa6Uv3d2tfP8yGJBqcWpTvHdw7Y0G1S33vfH5VB/ZN5GrtegPDJ8lsQ3IPbP1qxc6PEbGW4Fw0FxHlnjK8euKqy67P/AGfBEkb/ALyQYJGFBHaqjr/D2/IhmlYvcaVcpHeRqpC5A/hI+vrVy1vree/vgPnWQj91jqMAdPrVKG6XVZYoZ2ZPJ+YHg7varl8sVlqdvNZkuZ1KMMZIA57VzySbs9xok0/ZCstvdQsuCRtk6qvbBrQWxKwFldjC7fOCfnVewqnA1trLkytiZMojdCh9SO/NT28k8pksN+LhfldsHGPUf0rCd73+81iaBitksnOPlRSWUnhlxVKxaJnCR2hjTqrO2SfpT2tJlb7MzLJCQCW3YJGf8aa8aWDhZWdl6RcYI+prJbWuakrNiCZdwfa3y4+g/wDr1EkYI5/OkjCG1OxtoLDrT9siDBU49ua9fApaNGM77DvIHrxSiFD1wKZ5rZ9KUEsc170DFj/JGeBSi3p6c8ZqYLnoea3RJWNsMcUwwe1XQhPUU7y+MVYGeYcUhirQ8oDjrSGIdSKYGb5dJ5RrQMIPQU3yPaqAykjPpU6R1l29zOhAT5h/d61s28jSLl49n1NNO4CqpHQ08zGPgcmplVT3psiLTsIbHdIxw6496tCNSuQAR6g1mSpg8Ypsd3JC3ysfpSGanlD0pjBV6kVB9viuE2Sh4mP/AC0jP9Kgk02WXm3vkm/2WO1v8Kl+QE8kkXr+VVnngHVAfqaqTaZqEWS8Dkf7Jz/KqLiQZDRupHqprCU5LoNIvy3cfRUT8qpl43kG9FC9ziqbS7eM0eflSvXNefi5TlHQ1glcnu1DeXFbhDvPDHt70+5toZWG26jeeDBUyc+Zj2HP6UkMUJieGEM08gyAR04/SqcNtDYXsVzPdr52SPKAzgnjqK8OPrt+JsTBvPuRDdRPEoG5se/Sori2jsZZFeYusg3K5649KtXLPeX0QikChMh3xnI9qy9ctZY9RiWdzJbsu5SOBmrpq7tsZyNEHTbfRZF8tRJtIEnVmPaq1ldtpzjdJkS4IYDkN6fSqul6XaXBkMrtuJIQZ4X3qndLKLuWISGUQpuDIMFe3Pr2rRQjJuN7kGhrerx3LyNEMoRtlYfxN7Cqtiw1eeGzm/dQRfOXHXPQAVGLL9y7yFEeNcqAfvZ9ahluvs9ukcKmK5XBwBwRnk1pGKS5Ybg0dJZG0jtpIZo8BWPzg859aS2ubi2vY5bmMIJlzC56FQeh9D7VmWIE9rJO75kzkN0GRWhLNeajosyx2jTBfmIf5SPcVzyjrZiHyXTx628kUQ8iYgGQdA2Oa2pIUWKO5tpHNxwjlv4/rWNp0M2raaFtisYUhgX7MK0tPiluoj9omEc8ZZQo6bgec1hVVvloaRZZmWW1jFzNIhiYbWIJ+Q1E01rqN0oeSR1UcBBhc/jz+lWbWKa8jIZAsQOJFc8v7AVS02IxFHkCozE+WhOSR3rFWs31Rqi7A0LRSLblWBGACw+U++Kqi4uI5WRAzhTjIU4NTzzWUEx/erHu6j3Hf9azl1GVh8x4J4NdeCS573aImaS3Of8AWwMPcCp0MMnKPj2NZi3b/wB6p470j70aNX0tKatqc7RpCNlORyPapApzmq0N1bN/ejPvyKuo6EcSK1dSVyRytinja3UUmFz1FL8o6EVdgHgKKXYDzTMgdx+dAkIosA4xDsKPJpPN9qPNHrTA5tBs4Xge1So5HrTTG5HCmmiKQ1QF2OUHgnFSk8etU44znk4NW1QbeTzTEV5Bmqrrz3rSdFxxUDL3xUtDKBB7UbmXoSKtNGOuKYYx6VLAE1C5i+5Mw9qe2r3OPnWN/qtRGIdqY0Xqal3AWXUYHH72yjPuKoSzacTuEBQg9hT5oevFZ8sLHPFcde7VmXEn/eeYtzbdeQDnFPu7KVZo5UkhLxsCD94OfbHWqUbSxgx5zGex7VYa2l/s6NxM0pY/u1QZI+mK+dqRcJnQndGhePGbQO2yMR/MvOM//rrOtymqTML5CsbMNik42iq0cTx2ciXAledJVJjbkIvX86YJUmuw5Lquwgdt/wBPWkoct7feTIfYQ20cs+/zwsbsTsU4I7HNJLo9zY28+oROHSVsuO6qff1ratLiA6O0cpHkiPGdwB4//VVW0uWmsxazo/lzZBcnoMfzo9pK7ZNjKvbeC3KusjzRsnC9dvp+FVHhj+zRsvF2W2kdSR3/AAxW42nyaXL9ktla5gnXO6Q8r+fasW3MqXbwsoM7AgZ4xj6/Wt4Surp3GQ6fFK8skUrEKW+6p4BroLLUpo7l7JciUJkuT8uK5mO21CPUGO8tKnLFDnIqzHdOmpSOJCAVG5mHJp1aanfroTY6K0uDpz/ZCWIGXDp3z6+9WLF7i9vLohkiBfKt6ZHSsHQonvdRmubySRguAFJxx9PSukhsltdRk+yN8kxBCHsTXHWSi2upcS3LMdJkiSWT5X4STt+NUY1k/tF4CVBtiWjdTxIjc8ep57U3Uzf7lR7fdCGyCg3dK0LuaKysYVcDzCwWM7eVz3rFaLu2aIpzWUcTPHdReZFIN6S9Gz3z71Sm0+SNRJC5mg6hh1H1FS3LzvcrNNcJKpGFIOFGDyMdj0qJFubWZpbdv3bHJj6r9R6V6OC3tIzkuw1M8c1MrleCaso0F4uWType4Hr/AFpklnJGN33k/vCvbhDsYtiLLz1qZZcdDVbbkcCnAHtW8SS6s7DoxqZZz6ms0PjrUqyVqmwNETE9zThKezGqKy89alDe9UmIti5cd81ILs+gqiGpd4qrgSC4hI+9SfaYecGssI1OCMO2adwNDz4ienNPE47CqSIT2q1FCfwpgTGTd3xTTz3qVIgKlEQ7UxFXy/SnCLParax47VMsakdKVgKItQad9i9av4A6CgjNS4gZUlkDxjNUbizCg5wAOSa1r6+t7NP3jZbsg5JrltR1Ca+yp/dxdkHf61hU5UtSkmV5prWRinmYj5yw74qKUyCNYbW4KKRwu7H41UWFmc47dfap7aJ5bwY+Zdp3c8189iYWnzNnTF6WHWt3BZwtbvJvn5J2Atkn1Nal49tdaKN6JuhT5Gz0z1Hsax4IBaXztIuUYbd392kupLaK4SRJVZUO5152nH9a5nFSkmhstyWMFxpyQwnDMR5f/wBeoL++utPgt4ZIkw7ArKG+X3xVgSpNJBLYoZTgsQPlO08d6ytZvTq9qbeFNkdq4355PPAIPpnj64rSlFylaW35EMty31ydUTYyOHUDviMU24kuBDJbG0mdo23m5Vcqozyc9/oKct5ZyaD5EETmXAwEUhg44zmtXSdSh/stLedlLbPLkXqzk55x1NKT5FdR2AyIMLcuYZwUCgs55JaibRp49MM7zI4yGYAc8ntV2Hw1DFpzm2u9kj/vA7DgL6GmWp1P+yWHA2pmNxxkgcAj86PaXd4PqFjVleCLT2n2YaNRtK8MQKp6XqM020mPczyEluwH8+P6UzTFuDp9159s8t42XiMg4ZSMEfzqTSrhNTsrqyMUdtK6HDdAD/jWDgkmnrqWhZXv4tUMrMwRslGU5GPatO5gklaK4wJjt+cDnI6jHrWZps88McNnKMCJXRyeQw7Y/WpVvbqynIIV4WO5UIxtB7D6UKnzTUU7D5rIsX1vFqESmJlSc/dbOAf9k+lYkTy20xjdWRgcMp4wa12tbbUpfPspzbXf8SP91/qP6ipJrZpcRX0RhnAwsvUH8e4r2sNhfZxtuYylcpiXOD1PrV63vcEBzj3qhJDLaNslXGfusOhpEkB4rtjeJD1NkxRycj5Se46GoXt2U5x+VVIrhounK+ladtcLKOPxU9RXRFpk2KZiI5IyKaU9K1WgBGV6GoXg9sVaQijgqc1IrVIY8cVGVwaYDw2aN3tUfIOMUZx1FMColwelTLcetY8EzMil12MRyKtI/vVJjNVJ6sJcZ4BrIV8fxVMk2OlO4jYS4Gcc5qdJWx/jWRHcE9MCrUcjN1ancDSDnuRUitVISKg3OwAHcnFQTauqjbANx/vEcUm0hWNZ5khj3yMFHqayLzV5HBW2XaP75/pVCSaS5cF2Lv2H/wBanraTN94rEPVzj9Kzcm9h2KMgLOWclmPJJ5NMispbs/IpCeta6WllEMyFrh/TotLPMzx7eEjHRF4FZOn3Hcx5beCBcMd2OiL0/E96zri6k6Q/JngBa0bqMYJNUU3JcBYY98jDCrjrXFXg+iNIsqbbrbHGznjgA9s9/rVu6sopIo7bcFt4z8zL1Y+/tV2a0EUCNcYeQDkZ4qqLh50MBUfLli/oo7f0ryqlKpFq+5opJk8NjLqdvLcRwxRAjYrg4woHU+grHmt1tdFuJIiRbtIPNbPMuD8oX2J5/Crbx3Oo6WzjfHHI5IROFYD19aFtfK0R/MkEqRfejJ/iPQD/AD61nB8ujfXYe5HGFn8PGHTS0JmfLGQ8r0OM1c02EmTUXhnie7eMJFt5wcYYj86ppNqEcuLe2BtLlVBQj7uM9PSrSSHS7iK8MLbZplXy2Od2Rg49MD9cU5t2aXXX/hwSGH7Tp2gW0UluxYytG28cBc5x+PrWxFdb7dmtoZjE6kR5GSB0/Qg1nWLtNrV2ZhvhY/JC/IxngAU2Rv7S+aKV7ezAbyVX+IBiCD7ZH61lOKk7P1v6jSJbfUdUknaARIG4UkKScDv+NXLCG2mvJpEjLZdmDdmGc7x7+vvVH+0jZazCiZaBIkDKRyD3x+OKtIrmUgYJD70QDgxtnHPfP9KicdNrXGMuruKe/jmRCPLYDPTI71PehhJluR2rLinxNIjLlCxwO4rbhxc2a5O5lG0n+tehhsLrcznIz1GDuGQ3Wtez1QhPJuR5kXTJ61mvGUbHSmjj8K9SneBk9TpPscFzbkQuHjP8BPT8e1Yl5pz2bFgCY/UjkfWi3uXhbKMVP1rUi1NpF2yBXHQ5711WjJE6owgeeePrT1ZlYEZBHStZ7fT5udjQk/3Dx+VQPpqE/u7kH03Cj2bWwXLNleiQhJCFY/ka0Gi4rDNjMvRkb6HFalhcvgW9wCrjhWPetI32YmDwioHgz0rTePNQMuKuwjKeEjmm+Wa0mjB7VEYOaLAciBTwxA60pX0oCVBYqyN3qQSEdqjwT7U4KR0paiJ0uGHQVMLyY8LxVUDHWpFzjHXPQDvRdgSmRmO6Qlj6ntVy2s2lAd/kQ9B3NPtLEIFknAz2X0q8TnhapR6sVyEQiJSEG0d/ekEZ/CpxGep6UpPbFOwiuI6jljwOetWSCBmq0uM5zxUtDKM0TSuscalmY4AFaVvYRWEJPDSsPmf/AD2qzY2/lp50g+dhx7CmXj449qhxS1C5g6g5lc/3RU2n6YptHkuB8snUHuOwqWO0Nzcqh+71b6VpXK7k2Lwo4Fc6pJtzY79DnL28nEj+S+yMcAYqsqg2gnmJkVW3yKeuR0rQvbUD5QKqTxFNKmI/vL/hXmYjDato1jIhsZDc2s0wfypSSsag446ULp8MtzBh2PkoN3s3p/WmwxsbSJVGGLFVA75rXMH2BYY4xnAO73NcyoVHzOJfMjOa+hkQpLGY9pZS6KeG7HNPsw9ksdtKo+yWzM5P94MD/ImoJ4AZSSOSckGtJIftGnvGRmRUIX8ulNYZuNkg5tSOaWOe6ilt412ttQkjtmogkmnak7x/xZHPpUGmyAEQsTw4Zfz6V02pWAY7wPxrqw2D91qREp6nLSoRLkjk81oadP5MoVj8j8GmXMJB5FQbSGx2rtjHkehO5vXNvuGQOaznQqeRWjpt2J0+zSn94B8p/vCn3FqRniuvlT1RBkinq5HtTniKE8UzGDQlYLlpZg3B4NSiXFUgTUqscVohFoSZ6GpEmI4PI9KqA5pwY1SYG/DOJUAPWnOtZNvcGNhnpWj52UDKMjvVCGOtR/jUxkVqaVGaAOU8s96UR+lTDHfmjAzUjItgHWkK1IRTcjtSGN2/hWpY2ohAlkHznoD2qCxtwzea3IXpn1rRxk04oQ8ksetSLGKRIwe9TFcAAVYhp4HFAJHbmpQnA9ajkzyAaTArvhs+lMihEtwoxlRyaGHBPartpEIodzfebk1G4EsjhEz7cVkykuxY9e1Xbh9zEdhTLWIPJvP3V6fWolq7DC2hMMOMfvG+8fT2qb7P/e5qdU4J6AcCpjHhQcU7CMO9h3MTj2rLvYtunMv96RR/M/0robtADjHasfUF/dwJ/tFz/L/GuepHdlIj0Wx82VXYfLDkj6mtOa2Lv0qzodvt0/eRy7E/0q48YBAxzVwpJQSBvU5O8tSr9Kdakoy1sX1sOpFZJTyx+NT7NJhczr60MF84TgN86H612cDC+0+Gbj50BP17/rXP3sXnWaTj70R2t/unp+tX/DlySklqT935l+h61UI2k13B7Fe+ttkmMcVjSxlJOK7O8tw65xmudurYhjjtVSgCZQjchgQcMDkH3ro7O5W+t/mx5q/eHr71ze3axJFWLad7aVZU7dR6iiGjBmvPb5BwOaz5IipzitsMlxCJU6NVaaAOpwMGtrEmTjnpSqKlkRgQccios880rDJR0oBpEp7DPIpgOU81agnaM8niqPSnq5HemgNcbW+YdDUm32rNhmKnrxVoS8fepgYGQBTS5zxUXmUbsnrSAk6n1qSOMuwUVEpArRtY9ibm+81CVwJ0QKqqowBxVhIwelMQAnHep1wBgVQiRUxxT9nPJpiMfTJpxbPTigBXGBwaquCRkmpJGO04NVmY49qhgPiTzZVU9Op+lXZZAM1UtPlV3IxuOB9KJZAX46Ck9EBHIST3rQt4wkap36tVS2jMjbyOAePetSKPb1/GiMeoMcibm56CpXXI9qcigCkm4iNNrQDKusFyfwrJvQDPjsigVryYZxnp1rJ5mlyesj5/WsZrSw0dLYw+XYQr6IKc6j0qaMbVC+gpjjn8a3toIpTx74mB61h3URDV00iA8isq8h+UsB14qXEDNs2RmaCU/u5QUb2z3/CqVpI+n34Zlw0TlJB7dDVhl8t8Gm36b9lwOS/yv7kDr+VZtdSkdaCs0QKkFSMg1kXdttdsUnh673I1q55TlPpWpcxZG/HNarVXJOTnt9ueOtVNpXiuimgDBhisua1OCR2qXEdxdNvTbS7HP7p+vsfWtiYY5HQ965sqy9a1dNvRIn2aU8j7pP8AKqi+giSRBIMj7wqm6ZJBGDVyVdrHtULFXODwabGVFJXqKnVtw5pjrtYhuaFyOnSkBIycVHjHFSqcikZc9KYDQeafvPrUJ4NLvNFwME3IHemG9iTlpAPxrg/7QnbrNIf+BGpraR5ZQoyWY4FZ8wHoWnTx3cp2HKJyT2rdjOTWJpUC2tskI6jlj6mtpGAFaoCynB4HNSphScjJqBHwcipN/HuaYibzMdOtG4EYqEMCPencAHFIBJXAXHaoDlwOwpxy30pBjIHYVLGSb9sfHbpUagyuFHUmo5JMtgdBVqyjyTIe/A+lK12BegQAAAcAcVaUHNMRMKKnXGMitCSUDAqG5bjFTZGCap3DDGaljKFw2I5Gz2x+dUrRN17AuONwqzcnMeP7xqPThnUY/QAn/P51k1eSGjpM4HvUTHrTlPHNROeSexrckc/K8duaguIg8RHerIGUP0pjr8ucdKAOcuYjux3zUGzzIJIT1IyPqK1b2Hdll6is4fLKDjvUNDRSs7g2t3FMP4WGfp3rtjiSMHqCK4adNk7pjjJxXV6Ncefp6AnLINppQ7DYyaHDHaao+ScsOtbE6dGFUnXkkVpYkyprXcM4rPeF4XyMjB4I7V0YUEGopLVXBBHWpcRmfDdi4jCScSAfnUchwxp8thhuOMVE0Mu3ruxQMBJng80dDkdKrsxQ8jFOWQ9c0XAtA57Yp3eoVcHvUm71oAR1zUW2pjg0m2lYDxFQc1saGmdQiz25rPEfNa+iri9U+xrGO4Hd2bYArSjO41j2rHArSSTA4rdMC8JNowKcCTjmqqNuFTqcDmmBMCQODQWLcA8UzOeO1Ix7CgALYXimF9qZNVp5xvWFD8xPzH0FEkmSBUtgSpmSRVHVjW7bxhVAHQcVj6aheZpP7vArbT5acUJkwFTj2qBOmTUobkYqhDyflNUJznirjHGRVC4btSYylcHlR7Zo00/6cfZD/MUydssx7dKTTmH21vdD/MVmviGdErfJULHAIzxmlDfKKimPHHrWxJaTlCO9SDBGCO1QxN8n4VMBwPWgDPuo8ZI6VkSRgOCOhreuANprGuEKscdM8UhmZqCbLkH+8oNaHh6fbO8RPDDIqnqfIhbvgjNRafN5F9C3bdistpDOycZ47VQkADlfyrQPIyOtVLlAQHHUVsSVhgZp+3IBFRZwwJ6Gp044HQ0DIZo8jP51QkXYT7VrlQciqVxH19aGBlzqMhu3Q1WeLbyvFXJB1WoFPGKzkNFfJU09Zh3pzpmoHSlcZZ81fWk89fWqmD2pMUcwWPMQvNXrBjHcRkddwFUhVqDggjrWC3A7W2YgYrRjY4rMtTmNT32itJOgreIFtGx0qdSe/Sq0fT8amXk1YibJPPaqVzfAApFye7Ut8zARqCQrZyB3rO6jmonK2g0iS3bMpYntUzP3qCH+OpO4qUNnQadH5dqmep5NaAJ4qvB9xfpU461siCdTlQTUhOce1Rp92pOxoAbI2Kz5z8w/OtB+1Ztx95vpSYFKU5TNRWLldQUeqGpJfun6VBZf8hNf90/yrNbjOkV8pmoJXOCD1Bp0X+pqK4+6a2EXbdsoAatRnjPeqNt91aux0IRDMODWTcja49DWvN1rJuvvGgZl34zAmezGs8NtYEdjWhf/AOpH1/xrPHU1jLcpHbW0omtY3B4YCkccFT0NVNGJOmLn3q3J0rZbEmbJmOQoenUVLBJztqO+6p9aZGTvBpAaQ+YfSoJUzk1Knf6UjfcNUBiXSFTVHcUc+hrTvvu/hWY4+UVEhkoIYUx0psR+apm6VBRUZcUYFSuKZgUAf//Z"
- },
- "key": " 1"
- }
- ]
-}
\ No newline at end of file
diff --git a/how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.ipynb b/how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.ipynb
deleted file mode 100644
index 7cdbcca4c..000000000
--- a/how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.ipynb
+++ /dev/null
@@ -1,260 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Copyright (c) Microsoft Corporation. All rights reserved.\n",
- "\n",
- "Licensed under the MIT License."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- ""
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Register TensorFlow SavedModel and deploy as webservice\n",
- "\n",
- "Following this notebook, you will:\n",
- "\n",
- " - Learn how to register a TF SavedModel in your Azure Machine Learning Workspace.\n",
- " - Deploy your model as a web service in an Azure Container Instance."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Prerequisites\n",
- "\n",
- "If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration notebook](../../../configuration.ipynb) to install the Azure Machine Learning Python SDK and create a workspace."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import azureml.core\n",
- "\n",
- "# Check core SDK version number.\n",
- "print('SDK version:', azureml.core.VERSION)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Initialize workspace\n",
- "\n",
- "Create a [Workspace](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace%28class%29?view=azure-ml-py) object from your persisted configuration."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "tags": [
- "create workspace"
- ]
- },
- "outputs": [],
- "source": [
- "from azureml.core import Workspace\n",
- "\n",
- "ws = Workspace.from_config()\n",
- "print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\\n')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Download the Model\n",
- "\n",
- "Download and extract the model from https://amlsamplenotebooksdata.blob.core.windows.net/data/flowers_model.tar.gz to \"models\" directory"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import os\n",
- "import tarfile\n",
- "import urllib.request\n",
- "\n",
- "# create directory for model\n",
- "model_dir = 'models'\n",
- "if not os.path.isdir(model_dir):\n",
- " os.mkdir(model_dir)\n",
- "\n",
- "url=\"https://amlsamplenotebooksdata.blob.core.windows.net/data/flowers_model.tar.gz\"\n",
- "response = urllib.request.urlretrieve(url, model_dir + \"/flowers_model.tar.gz\")\n",
- "tar = tarfile.open(model_dir + \"/flowers_model.tar.gz\", \"r:gz\")\n",
- "tar.extractall(model_dir)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Register model\n",
- "\n",
- "Register a file or folder as a model by calling [Model.register()](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model.model?view=azure-ml-py#register-workspace--model-path--model-name--tags-none--properties-none--description-none--datasets-none--model-framework-none--model-framework-version-none--child-paths-none-). For this example, we have provided a TensorFlow SavedModel (`flowers_model` in the notebook's directory).\n",
- "\n",
- "In addition to the content of the model file itself, your registered model will also store model metadata -- model description, tags, and framework information -- that will be useful when managing and deploying models in your workspace. Using tags, for instance, you can categorize your models and apply filters when listing models in your workspace. Also, marking this model with the scikit-learn framework will simplify deploying it as a web service, as we'll see later."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "tags": [
- "register model from file"
- ]
- },
- "outputs": [],
- "source": [
- "from azureml.core import Model\n",
- "\n",
- "model = Model.register(workspace=ws,\n",
- " model_name='flowers', # Name of the registered model in your workspace.\n",
- " model_path= model_dir + '/flowers_model', # Local Tensorflow SavedModel folder to upload and register as a model.\n",
- " model_framework=Model.Framework.TENSORFLOW, # Framework used to create the model.\n",
- " model_framework_version='1.14.0', # Version of Tensorflow used to create the model.\n",
- " description='Flowers model')\n",
- "\n",
- "print('Name:', model.name)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Deploy model\n",
- "\n",
- "Deploy your model as a web service using [Model.deploy()](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model.model?view=azure-ml-py#deploy-workspace--name--models--inference-config--deployment-config-none--deployment-target-none-). Web services take one or more models, load them in an environment, and run them on one of several supported deployment targets.\n",
- "\n",
- "For this example, we will deploy your TensorFlow SavedModel to an Azure Container Instance (ACI)."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Use a default environment (for supported models)\n",
- "\n",
- "The Azure Machine Learning service provides a default environment for supported model frameworks, including TensorFlow, based on the metadata you provided when registering your model. This is the easiest way to deploy your model.\n",
- "\n",
- "**Note**: This step can take several minutes."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from azureml.core import Webservice\n",
- "from azureml.exceptions import WebserviceException\n",
- "\n",
- "service_name = 'tensorflow-flower-service'\n",
- "\n",
- "# Remove any existing service under the same name.\n",
- "try:\n",
- " Webservice(ws, service_name).delete()\n",
- "except WebserviceException:\n",
- " pass\n",
- "\n",
- "service = Model.deploy(ws, service_name, [model])\n",
- "service.wait_for_deployment(show_output=True)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "After your model is deployed, perform a call to the web service."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import requests\n",
- "\n",
- "headers = {'Content-Type': 'application/json'}\n",
- "\n",
- "if service.auth_enabled:\n",
- " headers['Authorization'] = 'Bearer '+ service.get_keys()[0]\n",
- "elif service.token_auth_enabled:\n",
- " headers['Authorization'] = 'Bearer '+ service.get_token()[0]\n",
- "\n",
- "scoring_uri = service.scoring_uri # If you have a SavedModel with classify and regress, \n",
- " # you can change the scoring_uri from 'uri:predict' to 'uri:classify' or 'uri:regress'.\n",
- "print(scoring_uri)\n",
- "\n",
- "with open('tensorflow-flower-predict-input.json', 'rb') as data_file:\n",
- " response = requests.post(\n",
- " scoring_uri, data=data_file, headers=headers)\n",
- "print(response.status_code)\n",
- "print(response.elapsed)\n",
- "print(response.json())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "When you are finished testing your service, clean up the deployment."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "service.delete()"
- ]
- }
- ],
- "metadata": {
- "authors": [
- {
- "name": "vaidyas"
- }
- ],
- "kernelspec": {
- "display_name": "Python 3.6",
- "language": "python",
- "name": "python36"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.7.0"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
\ No newline at end of file
diff --git a/how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.yml b/how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.yml
deleted file mode 100644
index 99ffc747f..000000000
--- a/how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-name: tensorflow-model-register-and-deploy
-dependencies:
-- pip:
- - azureml-sdk
diff --git a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb
index 7f0702b3f..b21e3fe01 100644
--- a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb
+++ b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb
@@ -58,7 +58,7 @@
"\n",
"Problem: Boston Housing Price Prediction with scikit-learn (train a model and run an explainer remotely via AMLCompute, and download and visualize the remotely-calculated explanations.)\n",
"\n",
- "|  |\n",
+ "|  |\n",
"|:--:|\n"
]
},
@@ -672,7 +672,7 @@
"source": [
"# retrieve model for visualization and deployment\n",
"from azureml.core.model import Model\n",
- "from sklearn.externals import joblib\n",
+ "import joblib\n",
"original_model = Model(ws, 'model_explain_model_on_amlcomp')\n",
"model_path = original_model.download(exist_ok=True)\n",
"original_model = joblib.load(model_path)"
@@ -692,7 +692,7 @@
"outputs": [],
"source": [
"# retrieve x_test for visualization\n",
- "from sklearn.externals import joblib\n",
+ "import joblib\n",
"x_test_path = './x_test_boston_housing.pkl'\n",
"run.download_file('x_test_boston_housing.pkl', output_file_path=x_test_path)"
]
diff --git a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/train_explain.py b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/train_explain.py
index c05294e1d..c3dff4e67 100644
--- a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/train_explain.py
+++ b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/train_explain.py
@@ -7,7 +7,7 @@
from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient
from sklearn.model_selection import train_test_split
from azureml.core.run import Run
-from sklearn.externals import joblib
+import joblib
import os
import numpy as np
diff --git a/how-to-use-azureml/explain-model/azure-integration/scoring-time/score_local_explain.py b/how-to-use-azureml/explain-model/azure-integration/scoring-time/score_local_explain.py
index c102f909d..31f892750 100644
--- a/how-to-use-azureml/explain-model/azure-integration/scoring-time/score_local_explain.py
+++ b/how-to-use-azureml/explain-model/azure-integration/scoring-time/score_local_explain.py
@@ -3,7 +3,7 @@
import pandas as pd
import os
import pickle
-from sklearn.externals import joblib
+import joblib
from sklearn.linear_model import LogisticRegression
from azureml.core.model import Model
diff --git a/how-to-use-azureml/explain-model/azure-integration/scoring-time/score_remote_explain.py b/how-to-use-azureml/explain-model/azure-integration/scoring-time/score_remote_explain.py
index 7ffc21b34..48c628766 100644
--- a/how-to-use-azureml/explain-model/azure-integration/scoring-time/score_remote_explain.py
+++ b/how-to-use-azureml/explain-model/azure-integration/scoring-time/score_remote_explain.py
@@ -3,7 +3,7 @@
import pandas as pd
import os
import pickle
-from sklearn.externals import joblib
+import joblib
from sklearn.linear_model import LogisticRegression
from azureml.core.model import Model
diff --git a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb
index fb3635fc3..f18033175 100644
--- a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb
+++ b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb
@@ -165,7 +165,7 @@
"outputs": [],
"source": [
"from sklearn.model_selection import train_test_split\n",
- "from sklearn.externals import joblib\n",
+ "import joblib\n",
"from sklearn.preprocessing import StandardScaler, OneHotEncoder\n",
"from sklearn.impute import SimpleImputer\n",
"from sklearn.pipeline import Pipeline\n",
diff --git a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb
index 64b4e187d..c3a86eb2a 100644
--- a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb
+++ b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb
@@ -63,7 +63,7 @@
"7.\tCreate an image and register it in the image registry.\n",
"8.\tDeploy the image as a web service in Azure.\n",
"\n",
- "|  |\n",
+ "|  |\n",
"|:--:|"
]
},
@@ -325,7 +325,7 @@
"source": [
"# retrieve model for visualization and deployment\n",
"from azureml.core.model import Model\n",
- "from sklearn.externals import joblib\n",
+ "import joblib\n",
"original_model = Model(ws, 'amlcompute_deploy_model')\n",
"model_path = original_model.download(exist_ok=True)\n",
"original_svm_model = joblib.load(model_path)"
@@ -352,7 +352,7 @@
"outputs": [],
"source": [
"# retrieve x_test for visualization\n",
- "from sklearn.externals import joblib\n",
+ "import joblib\n",
"x_test_path = './x_test.pkl'\n",
"run.download_file('x_test_ibm.pkl', output_file_path=x_test_path)\n",
"x_test = joblib.load(x_test_path)"
diff --git a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train_explain.py b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train_explain.py
index f13d1acae..f3629b98e 100644
--- a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train_explain.py
+++ b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train_explain.py
@@ -6,7 +6,7 @@
import pandas as pd
import zipfile
from sklearn.model_selection import train_test_split
-from sklearn.externals import joblib
+import joblib
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb
index 338d68225..48b106db0 100644
--- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb
+++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb
@@ -252,7 +252,7 @@
"source": [
"binaries_folder = \"azurebatch/job_binaries\"\n",
"if not os.path.isdir(binaries_folder):\n",
- " os.mkdir(binaries_folder)\n",
+ " os.makedirs(binaries_folder)\n",
"\n",
"file_name=\"azurebatch.cmd\"\n",
"with open(path.join(binaries_folder, file_name), 'w') as f:\n",
diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb
new file mode 100644
index 000000000..78b3dc4a6
--- /dev/null
+++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb
@@ -0,0 +1,510 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Copyright (c) Microsoft Corporation. All rights reserved. \n",
+ "Licensed under the MIT License."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Showcasing Dataset and PipelineParameter\n",
+ "\n",
+ "This notebook demonstrates how a [**FileDataset**](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) or [**TabularDataset**](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) can be parametrized with [**PipelineParameters**](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelineparameter?view=azure-ml-py) in an AML [Pipeline](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipeline(class)?view=azure-ml-py). By parametrizing datasets, you can dynamically run pipeline experiments with different datasets without any code change.\n",
+ "\n",
+ "A common use case is building a training pipeline with a sample of your training data for quick iterative development. When you're ready to test and deploy your pipeline at scale, you can pass in your full training dataset to the pipeline experiment without making any changes to your training script. \n",
+ " \n",
+ "To see more about how parameters work between steps, please refer [aml-pipelines-with-data-dependency-steps](https://aka.ms/pl-data-dep).\n",
+ "\n",
+ "* [How to create a Pipeline with a Dataset PipelineParameter](#index1)\n",
+ "* [How to submit a Pipeline with a Dataset PipelineParameter](#index2)\n",
+ "* [How to submit a Pipeline and change the Dataset PipelineParameter value from the sdk](#index3)\n",
+ "* [How to submit a Pipeline and change the Dataset PipelineParameter value using a REST call](#index4)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Azure Machine Learning and Pipeline SDK-specific imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import azureml.core\n",
+ "from azureml.core import Workspace, Experiment, Dataset\n",
+ "from azureml.core.compute import ComputeTarget, AmlCompute\n",
+ "from azureml.data.dataset_consumption_config import DatasetConsumptionConfig\n",
+ "from azureml.widgets import RunDetails\n",
+ "\n",
+ "from azureml.pipeline.core import PipelineParameter\n",
+ "from azureml.pipeline.core import Pipeline, PipelineRun\n",
+ "from azureml.pipeline.steps import PythonScriptStep\n",
+ "\n",
+ "# Check core SDK version number\n",
+ "print(\"SDK version:\", azureml.core.VERSION)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Initialize Workspace\n",
+ "\n",
+ "Initialize a workspace object from persisted configuration. If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure the config file is present at .\\config.json\n",
+ "\n",
+ "If you don't have a config.json file, go through the [configuration Notebook](https://aka.ms/pl-config) first.\n",
+ "\n",
+ "This sets you up with a working config file that has information on your workspace, subscription id, etc."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ws = Workspace.from_config()\n",
+ "print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Create an Azure ML experiment\n",
+ "\n",
+ "Let's create an experiment named \"showcasing-dataset\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Choose a name for the run history container in the workspace.\n",
+ "experiment_name = 'showcasing-dataset'\n",
+ "source_directory = '.'\n",
+ "\n",
+ "experiment = Experiment(ws, experiment_name)\n",
+ "experiment"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Create or Attach an AmlCompute cluster\n",
+ "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Choose a name for your cluster.\n",
+ "amlcompute_cluster_name = \"cpu-cluster\"\n",
+ "\n",
+ "found = False\n",
+ "# Check if this compute target already exists in the workspace.\n",
+ "cts = ws.compute_targets\n",
+ "if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':\n",
+ " found = True\n",
+ " print('Found existing compute target.')\n",
+ " compute_target = cts[amlcompute_cluster_name]\n",
+ " \n",
+ "if not found:\n",
+ " print('Creating a new compute target...')\n",
+ " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"STANDARD_NC6\"\n",
+ " #vm_priority = 'lowpriority', # optional\n",
+ " max_nodes = 4)\n",
+ "\n",
+ " # Create the cluster.\n",
+ " compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n",
+ " \n",
+ " # Can poll for a minimum number of nodes and for a specific timeout.\n",
+ " # If no min_node_count is provided, it will use the scale settings for the cluster.\n",
+ " compute_target.wait_for_completion(show_output = True, timeout_in_minutes = 10)\n",
+ " \n",
+ " # For a more detailed view of current AmlCompute status, use get_status()."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Dataset Configuration\n",
+ "\n",
+ "The following steps detail how to create a [FileDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) and [TabularDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) from an external CSV file, and configure them to be used by a [Pipeline](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipeline(class)?view=azure-ml-py):\n",
+ "\n",
+ "1. Create a dataset from a csv file\n",
+ "2. Create a [PipelineParameter](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelineparameter?view=azure-ml-py) object and set the `default_value` to the dataset. [PipelineParameter](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelineparameter?view=azure-ml-py) objects enabled arguments to be passed into Pipelines when they are resubmitted after creation. The `name` is referenced later on when we submit additional pipeline runs with different input datasets. \n",
+ "3. Create a [DatasetConsumptionConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.dataset_consumption_config.datasetconsumptionconfig?view=azure-ml-py) object from the [PiepelineParameter](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.pipelineparameter?view=azure-ml-py). The [DatasetConsumptionConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.dataset_consumption_config.datasetconsumptionconfig?view=azure-ml-py) object specifies how the dataset should be used by the remote compute where the pipeline is run. **NOTE** only [DatasetConsumptionConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.dataset_consumption_config.datasetconsumptionconfig?view=azure-ml-py) objects built on [FileDataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) can be set `as_mount()` or `as_download()` on the remote compute."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "tags": [
+ "datapath-remarks-sample"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "file_dataset = Dataset.File.from_files('https://dprepdata.blob.core.windows.net/demo/Titanic.csv')\n",
+ "file_pipeline_param = PipelineParameter(name=\"file_ds_param\", default_value=file_dataset)\n",
+ "file_ds_consumption = DatasetConsumptionConfig(\"file_dataset\", file_pipeline_param).as_mount()\n",
+ "\n",
+ "tabular_dataset = Dataset.Tabular.from_delimited_files('https://dprepdata.blob.core.windows.net/demo/Titanic.csv')\n",
+ "tabular_pipeline_param = PipelineParameter(name=\"tabular_ds_param\", default_value=tabular_dataset)\n",
+ "tabular_ds_consumption = DatasetConsumptionConfig(\"tabular_dataset\", tabular_pipeline_param)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will setup a training script to ingest our passed-in datasets and print their contents. **NOTE** the names of the datasets referenced inside the training script correspond to the `name` of their respective [DatasetConsumptionConfig](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.dataset_consumption_config.datasetconsumptionconfig?view=azure-ml-py) objects we defined above."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%%writefile train_with_dataset.py\n",
+ "from azureml.core import Run\n",
+ "\n",
+ "input_file_ds_path = Run.get_context().input_datasets['file_dataset']\n",
+ "with open(input_file_ds_path, 'r') as f:\n",
+ " content = f.read()\n",
+ " print(content)\n",
+ "\n",
+ "input_tabular_ds = Run.get_context().input_datasets['tabular_dataset']\n",
+ "tabular_df = input_tabular_ds.to_pandas_dataframe()\n",
+ "print(tabular_df)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Create a Pipeline with a Dataset PipelineParameter\n",
+ "\n",
+ "Note that the ```file_ds_consumption``` and ```tabular_ds_consumption``` are specified as both arguments and inputs to create a step."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "train_step = PythonScriptStep(\n",
+ " name=\"train_step\",\n",
+ " script_name=\"train_with_dataset.py\",\n",
+ " arguments=[\"--param1\", file_ds_consumption, \"--param2\", tabular_ds_consumption],\n",
+ " inputs=[file_ds_consumption, tabular_ds_consumption],\n",
+ " compute_target=compute_target,\n",
+ " source_directory=source_directory)\n",
+ "\n",
+ "print(\"train_step created\")\n",
+ "\n",
+ "pipeline = Pipeline(workspace=ws, steps=[train_step])\n",
+ "print(\"pipeline with the train_step created\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Submit a Pipeline with a Dataset PipelineParameter\n",
+ "\n",
+ "Pipelines can be submitted with default values of PipelineParameters by not specifying any parameters."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Pipeline will run with default file_ds and tabular_ds\n",
+ "pipeline_run = experiment.submit(pipeline)\n",
+ "print(\"Pipeline is submitted for execution\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "RunDetails(pipeline_run).show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pipeline_run.wait_for_completion()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Submit a Pipeline with a different Dataset PipelineParameter value from the SDK\n",
+ "\n",
+ "The training pipeline can be reused with different input datasets by passing them in as PipelineParameters"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "iris_file_ds = Dataset.File.from_files('https://raw.githubusercontent.com/Azure/MachineLearningNotebooks/'\n",
+ " '4e7b3784d50e81c313c62bcdf9a330194153d9cd/how-to-use-azureml/work-with-data/'\n",
+ " 'datasets-tutorial/train-with-datasets/train-dataset/iris.csv')\n",
+ "\n",
+ "iris_tabular_ds = Dataset.Tabular.from_delimited_files('https://raw.githubusercontent.com/Azure/MachineLearningNotebooks/'\n",
+ " '4e7b3784d50e81c313c62bcdf9a330194153d9cd/how-to-use-azureml/work-with-data/'\n",
+ " 'datasets-tutorial/train-with-datasets/train-dataset/iris.csv')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pipeline_run_with_params = experiment.submit(pipeline, pipeline_parameters={'file_ds_param': iris_file_ds, 'tabular_ds_param': iris_tabular_ds}) "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "RunDetails(pipeline_run_with_params).show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pipeline_run_with_params.wait_for_completion()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Dynamically Set the Dataset PipelineParameter Values using a REST Call\n",
+ "\n",
+ "Let's publish the pipeline we created previously, so we can generate a pipeline endpoint. We can then submit the iris datasets to the pipeline REST endpoint by passing in their IDs. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "published_pipeline = pipeline.publish(name=\"Dataset_Pipeline\", description=\"Pipeline to test Dataset PipelineParameter\", continue_on_step_failure=True)\n",
+ "published_pipeline"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "published_pipeline.submit(ws, experiment_name=\"publishedexperiment\", pipeline_parameters={'file_ds_param': iris_file_ds, 'tabular_ds_param': iris_tabular_ds})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from azureml.core.authentication import InteractiveLoginAuthentication\n",
+ "import requests\n",
+ "\n",
+ "auth = InteractiveLoginAuthentication()\n",
+ "aad_token = auth.get_authentication_header()\n",
+ "\n",
+ "rest_endpoint = published_pipeline.endpoint\n",
+ "\n",
+ "print(\"You can perform HTTP POST on URL {} to trigger this pipeline\".format(rest_endpoint))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# specify the param when running the pipeline\n",
+ "response = requests.post(rest_endpoint, \n",
+ " headers=aad_token, \n",
+ " json={\"ExperimentName\": \"MyRestPipeline\",\n",
+ " \"RunSource\": \"SDK\",\n",
+ " \"DataSetDefinitionValueAssignments\": {\"file_ds_param\": {\"SavedDataSetReference\": {\"Id\": iris_file_ds.id}},\n",
+ " \"tabular_ds_param\": {\"SavedDataSetReference\": {\"Id\": iris_tabular_ds.id}}}\n",
+ " }\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "try:\n",
+ " response.raise_for_status()\n",
+ "except Exception: \n",
+ " raise Exception('Received bad response from the endpoint: {}\\n'\n",
+ " 'Response Code: {}\\n'\n",
+ " 'Headers: {}\\n'\n",
+ " 'Content: {}'.format(rest_endpoint, response.status_code, response.headers, response.content))\n",
+ "\n",
+ "run_id = response.json().get('Id')\n",
+ "print('Submitted pipeline run: ', run_id)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "published_pipeline_run_via_rest = PipelineRun(ws.experiments[\"MyRestPipeline\"], run_id)\n",
+ "RunDetails(published_pipeline_run_via_rest).show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "published_pipeline_run_via_rest.wait_for_completion()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ }
+ ],
+ "metadata": {
+ "authors": [
+ {
+ "name": "rafarmah"
+ }
+ ],
+ "category": "tutorial",
+ "compute": [
+ "AML Compute"
+ ],
+ "datasets": [
+ "Custom"
+ ],
+ "deployment": [
+ "None"
+ ],
+ "exclude_from_index": false,
+ "framework": [
+ "Azure ML"
+ ],
+ "friendly_name": "How to use Dataset as a PipelineParameter",
+ "kernelspec": {
+ "display_name": "Python 3.6",
+ "language": "python",
+ "name": "python36"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ },
+ "order_index": 13,
+ "star_tag": [
+ "featured"
+ ],
+ "tags": [
+ "None"
+ ],
+ "task": "Demonstrates the use of Dataset as a PipelineParameter"
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
\ No newline at end of file
diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.yml b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.yml
new file mode 100644
index 000000000..0c5c948ce
--- /dev/null
+++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.yml
@@ -0,0 +1,5 @@
+name: aml-pipelines-showcasing-dataset-and-pipelineparameter
+dependencies:
+- pip:
+ - azureml-sdk
+ - azureml-widgets
diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb
index 684ab67dc..affaa2138 100644
--- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb
+++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb
@@ -510,7 +510,7 @@
" inputs=[step_1_input],\n",
" num_workers=1,\n",
" python_script_path=python_script_path,\n",
- " python_script_params={'arg1', pipeline_param, 'arg2},\n",
+ " python_script_params={'arg1', pipeline_param, 'arg2'},\n",
" run_name='DB_Python_demo',\n",
" compute_target=databricks_compute,\n",
" allow_reuse=True\n",
diff --git a/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb b/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb
index 89462a87b..c9362a7c9 100644
--- a/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb
+++ b/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb
@@ -279,8 +279,7 @@
"# Specify CondaDependencies obj, add necessary packages\n",
"aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n",
" conda_packages=['pandas','scikit-learn'], \n",
- " pip_packages=['azureml-sdk[automl,explain]', 'pyarrow'], \n",
- " pin_sdk_version=False)\n",
+ " pip_packages=['azureml-sdk[automl,explain]', 'pyarrow'])\n",
"\n",
"print (\"Run configuration created.\")"
]
@@ -692,7 +691,6 @@
" debug_log = 'automated_ml_errors.log',\n",
" path = train_model_folder,\n",
" compute_target = aml_compute,\n",
- " run_configuration = aml_run_config,\n",
" featurization = 'auto',\n",
" training_data = training_dataset,\n",
" label_column_name = 'cost',\n",
diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/README.md b/how-to-use-azureml/machine-learning-pipelines/parallel-run/README.md
index b7274ae9f..d795a2623 100644
--- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/README.md
+++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/README.md
@@ -2,18 +2,16 @@
Azure Machine Learning Batch Inference targets large inference jobs that are not time-sensitive. Batch Inference provides cost-effective inference compute scaling, with unparalleled throughput for asynchronous applications. It is optimized for high-throughput, fire-and-forget inference over large collections of data.
-# Getting Started with Batch Inference Public Preview
+# Getting Started with Batch Inference
-Batch inference public preview offers a platform in which to do large inference or generic parallel map-style operations. Below introduces the major steps to use this new functionality. For a quick try, please follow the prerequisites and simply run the sample notebooks provided in this directory.
+Batch inference offers a platform in which to do large inference or generic parallel map-style operations. Below introduces the major steps to use this new functionality. For a quick try, please follow the prerequisites and simply run the sample notebooks provided in this directory.
## Prerequisites
### Python package installation
-Following the convention of most AzureML Public Preview features, Batch Inference SDK is currently available as a contrib package.
-
If you're unfamiliar with creating a new Python environment, you may follow this example for [creating a conda environment](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-environment#local). Batch Inference package can be installed through the following pip command.
```
-pip install azureml-contrib-pipeline-steps
+pip install azureml-pipeline-steps
```
### Creation of Azure Machine Learning Workspace
@@ -66,9 +64,8 @@ base_image_registry.password = "password"
## Create a batch inference job
-**ParallelRunStep** is a newly added step in the azureml.contrib.pipeline.steps package. You will use it to add a step to create a batch inference job with your Azure machine learning pipeline. (Use batch inference without an Azure machine learning pipeline is not supported yet). ParallelRunStep has all the following parameters:
+**ParallelRunStep** is a newly added step in the azureml.pipeline.steps package. You will use it to add a step to create a batch inference job with your Azure machine learning pipeline. (Use batch inference without an Azure machine learning pipeline is not supported yet). ParallelRunStep has all the following parameters:
- **name**: this name will be used to register batch inference service, has the following naming restrictions: (unique, 3-32 chars and regex ^\[a-z\]([-a-z0-9]*[a-z0-9])?$)
- - **models**: zero or more model names already registered in Azure Machine Learning model registry.
- **parallel_run_config**: ParallelRunConfig as defined above.
- **inputs**: one or more Dataset objects.
- **output**: this should be a PipelineData object encapsulating an Azure BLOB container path.
diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb
index ef0008d1b..518fa7e0c 100644
--- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb
+++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb
@@ -23,11 +23,6 @@
"\n",
"In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n",
"\n",
- "> **Note**\n",
- "This notebook uses public preview functionality (ParallelRunStep). Please install azureml-contrib-pipeline-steps package before running this notebook. Pandas is used to display job results.\n",
- "```\n",
- "pip install azureml-contrib-pipeline-steps pandas\n",
- "```\n",
"> **Tip**\n",
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
"\n",
@@ -86,7 +81,6 @@
"source": [
"import os\n",
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
- "from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# choose a name for your cluster\n",
"compute_name = os.environ.get(\"AML_COMPUTE_CLUSTER_NAME\", \"cpu-cluster\")\n",
@@ -184,9 +178,20 @@
"mnist_ds_name = 'mnist_sample_data'\n",
"\n",
"path_on_datastore = mnist_data.path('mnist')\n",
- "input_mnist_ds = Dataset.File.from_files(path=path_on_datastore, validate=False)\n",
- "registered_mnist_ds = input_mnist_ds.register(ws, mnist_ds_name, create_new_version=True)\n",
- "named_mnist_ds = registered_mnist_ds.as_named_input(mnist_ds_name)"
+ "input_mnist_ds = Dataset.File.from_files(path=path_on_datastore, validate=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from azureml.data.dataset_consumption_config import DatasetConsumptionConfig\n",
+ "from azureml.pipeline.core import PipelineParameter\n",
+ "\n",
+ "pipeline_param = PipelineParameter(name=\"mnist_param\", default_value=input_mnist_ds)\n",
+ "input_mnist_ds_consumption = DatasetConsumptionConfig(\"minist_param_config\", pipeline_param).as_mount()"
]
},
{
@@ -306,8 +311,6 @@
"metadata": {},
"outputs": [],
"source": [
- "import os\n",
- "\n",
"scripts_folder = \"Code\"\n",
"script_file = \"digit_identification.py\"\n",
"\n",
@@ -341,8 +344,8 @@
"from azureml.core import Environment\n",
"from azureml.core.runconfig import CondaDependencies, DEFAULT_CPU_IMAGE\n",
"\n",
- "batch_conda_deps = CondaDependencies.create(pip_packages=[\"tensorflow==1.15.2\", \"pillow\"])\n",
- "\n",
+ "batch_conda_deps = CondaDependencies.create(pip_packages=[\"tensorflow==1.15.2\", \"pillow\", \n",
+ " \"azureml-core\", \"azureml-dataprep[fuse]\"])\n",
"batch_env = Environment(name=\"batch_environment\")\n",
"batch_env.python.conda_dependencies = batch_conda_deps\n",
"batch_env.docker.enabled = True\n",
@@ -362,17 +365,21 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.contrib.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
+ "from azureml.pipeline.core import PipelineParameter\n",
+ "from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
"\n",
"parallel_run_config = ParallelRunConfig(\n",
" source_directory=scripts_folder,\n",
" entry_script=script_file,\n",
- " mini_batch_size=\"5\",\n",
+ " mini_batch_size=PipelineParameter(name=\"batch_size_param\", default_value=\"5\"),\n",
" error_threshold=10,\n",
" output_action=\"append_row\",\n",
+ " append_row_file_name=\"mnist_outputs.txt\",\n",
" environment=batch_env,\n",
" compute_target=compute_target,\n",
- " node_count=2)"
+ " process_count_per_node=PipelineParameter(name=\"process_count_param\", default_value=2),\n",
+ " node_count=2\n",
+ ")"
]
},
{
@@ -392,10 +399,8 @@
"parallelrun_step = ParallelRunStep(\n",
" name=\"predict-digits-mnist\",\n",
" parallel_run_config=parallel_run_config,\n",
- " inputs=[ named_mnist_ds ],\n",
+ " inputs=[ input_mnist_ds_consumption ],\n",
" output=output_dir,\n",
- " models=[ model ],\n",
- " arguments=[ ],\n",
" allow_reuse=True\n",
")"
]
@@ -454,6 +459,47 @@
"pipeline_run.wait_for_completion(show_output=True)"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Resubmit a with different dataset\n",
+ "Since we made the input a `PipelineParameter`, we can resubmit with a different dataset without having to create an entirely new experiment. We'll use the same datastore but use only a single image."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "path_on_datastore = mnist_data.path('mnist/0.png')\n",
+ "single_image_ds = Dataset.File.from_files(path=path_on_datastore, validate=False)\n",
+ "single_image_ds._ensure_saved(ws)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pipeline_run_2 = experiment.submit(pipeline, \n",
+ " pipeline_parameters={\"mnist_param\": single_image_ds, \n",
+ " \"batch_size_param\": \"1\",\n",
+ " \"process_count_param\": 1}\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pipeline_run_2.wait_for_completion(show_output=True)"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -480,7 +526,7 @@
"\n",
"for root, dirs, files in os.walk(\"mnist_results\"):\n",
" for file in files:\n",
- " if file.endswith('parallel_run_step.txt'):\n",
+ " if file.endswith('mnist_outputs.txt'):\n",
" result_file = os.path.join(root,file)\n",
"\n",
"df = pd.read_csv(result_file, delimiter=\":\", header=None)\n",
diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.yml b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.yml
index cd4be0864..5ddece97f 100644
--- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.yml
+++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.yml
@@ -2,6 +2,6 @@ name: file-dataset-image-inference-mnist
dependencies:
- pip:
- azureml-sdk
- - azureml-contrib-pipeline-steps
+ - azureml-pipeline-steps
- azureml-widgets
- pandas
diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb
index 5aae38616..4edcef6c1 100644
--- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb
+++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb
@@ -23,11 +23,6 @@
"\n",
"In this notebook, we will demonstrate how to make predictions on large quantities of data asynchronously using the ML pipelines with Azure Machine Learning. Batch inference (or batch scoring) provides cost-effective inference, with unparalleled throughput for asynchronous applications. Batch prediction pipelines can scale to perform inference on terabytes of production data. Batch prediction is optimized for high throughput, fire-and-forget predictions for a large collection of data.\n",
"\n",
- "> **Note**\n",
- "This notebook uses public preview functionality (ParallelRunStep). Please install azureml-contrib-pipeline-steps package before running this notebook. Pandas is used to display job results.\n",
- "```\n",
- "pip install azureml-contrib-pipeline-steps pandas\n",
- "```\n",
"> **Tip**\n",
"If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction.\n",
"\n",
@@ -84,7 +79,6 @@
"source": [
"import os\n",
"from azureml.core.compute import AmlCompute, ComputeTarget\n",
- "from azureml.core.compute_target import ComputeTargetException\n",
"\n",
"# choose a name for your cluster\n",
"compute_name = os.environ.get(\"AML_COMPUTE_CLUSTER_NAME\", \"cpu-cluster\")\n",
@@ -304,7 +298,8 @@
"from azureml.core import Environment\n",
"from azureml.core.runconfig import CondaDependencies\n",
"\n",
- "predict_conda_deps = CondaDependencies.create(pip_packages=[ \"scikit-learn==0.20.3\" ])\n",
+ "predict_conda_deps = CondaDependencies.create(pip_packages=[\"scikit-learn==0.20.3\",\n",
+ " \"azureml-core\", \"azureml-dataprep[pandas,fuse]\"])\n",
"\n",
"predict_env = Environment(name=\"predict_environment\")\n",
"predict_env.python.conda_dependencies = predict_conda_deps\n",
@@ -325,19 +320,21 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.contrib.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
+ "from azureml.pipeline.steps import ParallelRunStep, ParallelRunConfig\n",
"\n",
"# In a real-world scenario, you'll want to shape your process per node and nodes to fit your problem domain.\n",
"parallel_run_config = ParallelRunConfig(\n",
- " source_directory=scripts_folder,\n",
- " entry_script=script_file, # the user script to run against each input\n",
- " mini_batch_size='5MB',\n",
- " error_threshold=5,\n",
- " output_action='append_row',\n",
- " environment=predict_env,\n",
- " compute_target=compute_target, \n",
- " node_count=3,\n",
- " run_invocation_timeout=600)"
+ " source_directory=scripts_folder,\n",
+ " entry_script=script_file, # the user script to run against each input\n",
+ " mini_batch_size='5MB',\n",
+ " error_threshold=5,\n",
+ " output_action='append_row',\n",
+ " append_row_file_name=\"iris_outputs.txt\",\n",
+ " environment=predict_env,\n",
+ " compute_target=compute_target, \n",
+ " node_count=3,\n",
+ " run_invocation_timeout=600\n",
+ ")"
]
},
{
@@ -359,7 +356,6 @@
" inputs=[named_iris_ds],\n",
" output=output_folder,\n",
" parallel_run_config=parallel_run_config,\n",
- " models=[model],\n",
" arguments=['--model_name', 'iris'],\n",
" allow_reuse=True\n",
")"
@@ -453,7 +449,7 @@
"\n",
"for root, dirs, files in os.walk(\"iris_results\"):\n",
" for file in files:\n",
- " if file.endswith('parallel_run_step.txt'):\n",
+ " if file.endswith('iris_outputs.txt'):\n",
" result_file = os.path.join(root,file)\n",
"\n",
"# cleanup output format\n",
diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.yml b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.yml
index 6d1c08a8b..9bdf3735a 100644
--- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.yml
+++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.yml
@@ -2,6 +2,6 @@ name: tabular-dataset-inference-iris
dependencies:
- pip:
- azureml-sdk
- - azureml-contrib-pipeline-steps
+ - azureml-pipeline-steps
- azureml-widgets
- pandas
diff --git a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb
index 0643b8a9d..d713baef4 100644
--- a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb
+++ b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb
@@ -26,11 +26,8 @@
"2. Run neural style on each image using one of the provided models (from `pytorch` pretrained models for this example).\n",
"3. Stitch the image back into a video.\n",
"\n",
- "> **Note**\n",
- "This notebook uses public preview functionality (ParallelRunStep). Please install azureml-contrib-pipeline-steps package before running this notebook.\n",
- "```\n",
- "pip install azureml-contrib-pipeline-steps\n",
- "```"
+ "> **Tip**\n",
+ "If your system requires low-latency processing (to process a single document or small set of documents quickly), use [real-time scoring](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-consume-web-service) instead of batch prediction."
]
},
{
@@ -356,7 +353,9 @@
"source": [
"from azureml.pipeline.core.graph import PipelineParameter\n",
"# create a parameter for style (one of \"candy\", \"mosaic\") to transfer the images to\n",
- "style_param = PipelineParameter(name=\"style\", default_value=\"mosaic\")"
+ "style_param = PipelineParameter(name=\"style\", default_value=\"mosaic\")\n",
+ "# create a parameter for the number of nodes to use in step no. 2 (style transfer)\n",
+ "nodecount_param = PipelineParameter(name=\"nodecount\", default_value=2)"
]
},
{
@@ -415,6 +414,8 @@
"parallel_cd.add_conda_package(\"pytorch\")\n",
"parallel_cd.add_conda_package(\"torchvision\")\n",
"parallel_cd.add_conda_package(\"pillow<7\") # needed for torchvision==0.4.0\n",
+ "parallel_cd.add_pip_package(\"azureml-core\")\n",
+ "parallel_cd.add_pip_package(\"azureml-dataprep[fuse]\")\n",
"\n",
"styleenvironment = Environment(name=\"styleenvironment\")\n",
"styleenvironment.python.conda_dependencies=parallel_cd\n",
@@ -427,17 +428,20 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.contrib.pipeline.steps import ParallelRunConfig\n",
+ "from azureml.pipeline.core import PipelineParameter\n",
+ "from azureml.pipeline.steps import ParallelRunConfig\n",
"\n",
"parallel_run_config = ParallelRunConfig(\n",
- " environment=styleenvironment,\n",
- " entry_script='transform.py',\n",
- " output_action='summary_only',\n",
- " mini_batch_size=\"1\",\n",
- " error_threshold=1,\n",
- " source_directory=scripts_folder,\n",
- " compute_target=gpu_cluster, \n",
- " node_count=3)"
+ " environment=styleenvironment,\n",
+ " entry_script='transform.py',\n",
+ " output_action='summary_only',\n",
+ " mini_batch_size=\"1\",\n",
+ " error_threshold=1,\n",
+ " source_directory=scripts_folder,\n",
+ " compute_target=gpu_cluster, \n",
+ " node_count=nodecount_param,\n",
+ " process_count_per_node=2\n",
+ ")"
]
},
{
@@ -446,7 +450,7 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.contrib.pipeline.steps import ParallelRunStep\n",
+ "from azureml.pipeline.steps import ParallelRunStep\n",
"from datetime import datetime\n",
"\n",
"parallel_step_name = 'styletransfer-' + datetime.now().strftime('%Y%m%d%H%M')\n",
@@ -455,9 +459,6 @@
" name=parallel_step_name,\n",
" inputs=[ffmpeg_images_file_dataset], # Input file share/blob container/file dataset\n",
" output=processed_images, # Output file share/blob container\n",
- " models=[mosaic_model, candy_model],\n",
- " tags = {'scenario': \"batch inference\", 'type': \"demo\"},\n",
- " properties = {'area': \"style transfer\"},\n",
" arguments=[\"--style\", style_param],\n",
" parallel_run_config=parallel_run_config,\n",
" allow_reuse=True #[optional - default value True]\n",
@@ -666,7 +667,8 @@
"response = requests.post(rest_endpoint, \n",
" headers=aad_token,\n",
" json={\"ExperimentName\": experiment_name,\n",
- " \"ParameterAssignments\": {\"style\": \"candy\", \"aml_node_count\": 2}})\n",
+ " \"ParameterAssignments\": {\"style\": \"candy\", \"NodeCount\": 3}})\n",
+ "\n",
"run_id = response.json()[\"Id\"]\n",
"\n",
"from azureml.pipeline.core.run import PipelineRun\n",
diff --git a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.yml b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.yml
index 8ab21c226..77330241e 100644
--- a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.yml
+++ b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.yml
@@ -2,7 +2,6 @@ name: pipeline-style-transfer
dependencies:
- pip:
- azureml-sdk
- - azureml-contrib-pipeline-steps
- azureml-pipeline-steps
- azureml-widgets
- requests
diff --git a/how-to-use-azureml/reinforcement-learning/README.md b/how-to-use-azureml/reinforcement-learning/README.md
index 209701e44..2c7ce77a6 100644
--- a/how-to-use-azureml/reinforcement-learning/README.md
+++ b/how-to-use-azureml/reinforcement-learning/README.md
@@ -22,7 +22,7 @@ Using these samples, you will be able to do the following.
|-------------------|--------------------------------------------|
| [devenv_setup.ipynb](setup/devenv_setup.ipynb) | Notebook to setup development environment for Azure ML RL |
| [cartpole_ci.ipynb](cartpole-on-compute-instance/cartpole_ci.ipynb) | Notebook to train a Cartpole playing agent on an Azure ML Compute Instance |
-| [cartpole_cc.ipynb](cartpole-on-single-compute/cartpole_cc.ipynb) | Notebook to train a Cartpole playing agent on an Azure ML Compute Cluster (single node) |
+| [cartpole_sc.ipynb](cartpole-on-single-compute/cartpole_sc.ipynb) | Notebook to train a Cartpole playing agent on an Azure ML Compute Cluster (single node) |
| [pong_rllib.ipynb](atari-on-distributed-compute/pong_rllib.ipynb) | Notebook to train Pong agent using RLlib on multiple compute targets |
| [minecraft.ipynb](minecraft-on-distributed-compute/minecraft.ipynb) | Notebook to train an agent to navigate through a lava maze in the Minecraft game |
diff --git a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/pong_rllib.py b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/pong_rllib.py
index c78a19c6c..7735dddac 100644
--- a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/pong_rllib.py
+++ b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/pong_rllib.py
@@ -23,17 +23,18 @@
ray.init(address=args.ray_address)
- tune.run(run_or_experiment=args.run,
- config={
- "env": args.env,
- "num_gpus": args.config["num_gpus"],
- "num_workers": args.config["num_workers"],
- "callbacks": {"on_train_result": callbacks.on_train_result},
- "sample_batch_size": 50,
- "train_batch_size": 1000,
- "num_sgd_iter": 2,
- "num_data_loader_buffers": 2,
- "model": {"dim": 42},
- },
- stop=args.stop,
- local_dir='./logs')
+ tune.run(
+ run_or_experiment=args.run,
+ config={
+ "env": args.env,
+ "num_gpus": args.config["num_gpus"],
+ "num_workers": args.config["num_workers"],
+ "callbacks": {"on_train_result": callbacks.on_train_result},
+ "sample_batch_size": 50,
+ "train_batch_size": 1000,
+ "num_sgd_iter": 2,
+ "num_data_loader_buffers": 2,
+ "model": {"dim": 42},
+ },
+ stop=args.stop,
+ local_dir='./logs')
diff --git a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb
index 0979479bd..7dd280d81 100644
--- a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb
+++ b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb
@@ -20,8 +20,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Azure ML Reinforcement Learning Sample - Pong problem\n",
- "Azure ML Reinforcement Learning (Azure ML RL) is a managed service for running distributed RL (reinforcement learning) simulation and training using the Ray framework.\n",
+ "# Reinforcement Learning in Azure Machine Learning - Pong problem\n",
+ "Reinforcement Learning in Azure Machine Learning is a managed service for running distributed reinforcement learning training and simulation using the open source Ray framework.\n",
"This example uses Ray RLlib to train a Pong playing agent on a multi-node cluster.\n",
"\n",
"## Pong problem\n",
@@ -48,7 +48,7 @@
"source": [
"The goal here is to train an agent to win an episode of Pong game against opponent with the score of at least 18 points. An episode in Pong runs until one of the players reaches a score of 21. Episodes are a terminology that is used across all the [OpenAI gym](https://gym.openai.com/envs/Pong-v0/) environments that contains a strictly defined task.\n",
"\n",
- "Training a Pong agent is a CPU intensive task and this example demonstrates the use of Azure ML RL service to train an agent faster in a distributed, parallel environment. You'll learn more about using the head and the worker compute targets to train an agent in this notebook below."
+ "Training a Pong agent is a compute-intensive task and this example demonstrates the use of Reinforcement Learning in Azure Machine Learning service to train an agent faster in a distributed, parallel environment. You'll learn more about using the head and the worker compute targets to train an agent in this notebook below."
]
},
{
@@ -57,7 +57,7 @@
"source": [
"## Prerequisite\n",
"\n",
- "The user should have completed the [Azure ML Reinforcement Learning Sample - Setting Up Development Environment](../setup/devenv_setup.ipynb) to setup a virtual network. This virtual network will be used here for head and worker compute targets. It is highly recommended that the user should go through the [Azure ML Reinforcement Learning Sample - Cartpole Problem](../cartpole-on-single-compute/cartpole_cc.ipynb) to understand the basics of Azure ML RL and Ray RLlib used in this notebook."
+ "The user should have completed the [Reinforcement Learning in Azure Machine Learning - Setting Up Development Environment](../setup/devenv_setup.ipynb) to setup a virtual network. This virtual network will be used here for head and worker compute targets. It is highly recommended that the user should go through the [Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Single Compute](../cartpole-on-single-compute/cartpole_sc.ipynb) to understand the basics of Reinforcement Learning in Azure Machine Learning and Ray RLlib used in this notebook."
]
},
{
@@ -69,7 +69,7 @@
"\n",
"* Connecting to a workspace to enable communication between your local machine and remote resources\n",
"* Creating an experiment to track all your runs\n",
- "* Creating a remote head and worker compute target on a vnet to use for training"
+ "* Creating remote head and worker compute target on a virtual network to use for training"
]
},
{
@@ -88,19 +88,19 @@
"source": [
"%matplotlib inline\n",
"\n",
- "# Azure ML core imports\n",
+ "# Azure Machine Learning core imports\n",
"import azureml.core\n",
"\n",
"# Check core SDK version number\n",
- "print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
+ "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Get Azure ML workspace\n",
- "Get a reference to an existing Azure ML workspace."
+ "### Get Azure Machine Learning workspace\n",
+ "Get a reference to an existing Azure Machine Learning workspace."
]
},
{
@@ -119,7 +119,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Create Azure ML experiment\n",
+ "### Create Azure Machine Learning experiment\n",
"Create an experiment to track the runs in your workspace."
]
},
@@ -140,9 +140,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Specify the name of your vnet\n",
+ "### Specify the name of your virtual network\n",
"\n",
- "The resource group you use must contain a vnet. Specify the name of the vnet here created in the [Azure ML Reinforcement Learning Sample - Setting Up Development Environment](../setup/devenv_setup.ipynb)."
+ "The resource group you use must contain a virtual network. Specify the name of the virtual network here created in the [Azure Machine Learning Reinforcement Learning Sample - Setting Up Development Environment](../setup/devenv_setup.ipynb)."
]
},
{
@@ -159,9 +159,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Create head computing cluster\n",
+ "### Create head compute target\n",
"\n",
- "In this example, we show how to set up separate compute clusters for the Ray head and Ray worker nodes. First we define the head cluster with GPU for the Ray head node. One CPU of the head node will be used for the Ray head process and the rest of the CPUs will be used by the Ray worker processes."
+ "In this example, we show how to set up separate compute targets for the Ray head and Ray worker nodes. First we define the head cluster with GPU for the Ray head node. One CPU of the head node will be used for the Ray head process and the rest of the CPUs will be used by the Ray worker processes."
]
},
{
@@ -186,15 +186,17 @@
" if head_compute_target.provisioning_state == 'Succeeded':\n",
" print('found head compute target. just use it', head_compute_name)\n",
" else: \n",
- " raise Exception('found head compute target but it is in state', head_compute_target.provisioning_state)\n",
+ " raise Exception(\n",
+ " 'found head compute target but it is in state', head_compute_target.provisioning_state)\n",
"else:\n",
" print('creating a new head compute target...')\n",
- " provisioning_config = AmlCompute.provisioning_configuration(vm_size=head_vm_size,\n",
- " min_nodes=head_compute_min_nodes, \n",
- " max_nodes=head_compute_max_nodes,\n",
- " vnet_resourcegroup_name=ws.resource_group,\n",
- " vnet_name=vnet_name,\n",
- " subnet_name='default')\n",
+ " provisioning_config = AmlCompute.provisioning_configuration(\n",
+ " vm_size=head_vm_size,\n",
+ " min_nodes=head_compute_min_nodes, \n",
+ " max_nodes=head_compute_max_nodes,\n",
+ " vnet_resourcegroup_name=ws.resource_group,\n",
+ " vnet_name=vnet_name,\n",
+ " subnet_name='default')\n",
"\n",
" # Create the cluster\n",
" head_compute_target = ComputeTarget.create(ws, head_compute_name, provisioning_config)\n",
@@ -203,7 +205,7 @@
" # If no min node count is provided it will use the scale settings for the cluster\n",
" head_compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
" \n",
- " # For a more detailed view of current AmlCompute status, use get_status()\n",
+ " # For a more detailed view of current AmlCompute status, use get_status()\n",
" print(head_compute_target.get_status().serialize())"
]
},
@@ -211,9 +213,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Create worker computing cluster\n",
+ "### Create worker compute target\n",
"\n",
- "Now we create a compute cluster with CPUs for the additional Ray worker nodes. CPUs in these worker nodes are used by Ray worker processes. Each Ray worker node may have multiple Ray worker processes depending on CPUs on the worker node. Ray can distribute multiple worker tasks on each worker node."
+ "Now we create a compute target with CPUs for the additional Ray worker nodes. CPUs in these worker nodes are used by Ray worker processes. Each Ray worker node, depending on the CPUs on the node, may have multiple Ray worker processes. There can be multiple worker tasks on each worker process (core)."
]
},
{
@@ -222,7 +224,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# Choose a name for your Ray worker cluster\n",
+ "# Choose a name for your Ray worker compute target\n",
"worker_compute_name = 'worker-cpu'\n",
"worker_compute_min_nodes = 0 \n",
"worker_compute_max_nodes = 4\n",
@@ -237,24 +239,26 @@
" if worker_compute_target.provisioning_state == 'Succeeded':\n",
" print('found worker compute target. just use it', worker_compute_name)\n",
" else: \n",
- " raise Exception('found worker compute target but it is in state', head_compute_target.provisioning_state)\n",
+ " raise Exception(\n",
+ " 'found worker compute target but it is in state', head_compute_target.provisioning_state)\n",
"else:\n",
" print('creating a new worker compute target...')\n",
- " provisioning_config = AmlCompute.provisioning_configuration(vm_size=worker_vm_size,\n",
- " min_nodes=worker_compute_min_nodes, \n",
- " max_nodes=worker_compute_max_nodes,\n",
- " vnet_resourcegroup_name=ws.resource_group,\n",
- " vnet_name=vnet_name,\n",
- " subnet_name='default')\n",
- "\n",
- " # Create the cluster\n",
+ " provisioning_config = AmlCompute.provisioning_configuration(\n",
+ " vm_size=worker_vm_size,\n",
+ " min_nodes=worker_compute_min_nodes,\n",
+ " max_nodes=worker_compute_max_nodes,\n",
+ " vnet_resourcegroup_name=ws.resource_group,\n",
+ " vnet_name=vnet_name,\n",
+ " subnet_name='default')\n",
+ "\n",
+ " # Create the compute target\n",
" worker_compute_target = ComputeTarget.create(ws, worker_compute_name, provisioning_config)\n",
" \n",
" # Can poll for a minimum number of nodes and for a specific timeout. \n",
" # If no min node count is provided it will use the scale settings for the cluster\n",
" worker_compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n",
" \n",
- " # For a more detailed view of current AmlCompute status, use get_status()\n",
+ " # For a more detailed view of current AmlCompute status, use get_status()\n",
" print(worker_compute_target.get_status().serialize())"
]
},
@@ -262,12 +266,12 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Train Pong Agent Using Azure ML RL\n",
- "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct RL run configurations for the underlying RL framework. Azure ML RL initially supports the [Ray framework](https://ray.io/) and its highly customizable [RLLib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLLib framework to train a Pong playing agent.\n",
+ "## Train Pong Agent\n",
+ "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct reinforcement learning run configurations for the underlying reinforcement learning framework. Reinforcement Learning in Azure Machine Learning supports the open source [Ray framework](https://ray.io/) and its highly customizable [RLLib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLLib framework to train a Pong playing agent.\n",
"\n",
"\n",
"### Define worker configuration\n",
- "Define a `WorkerConfiguration` using your worker compute target. We also specify the number of nodes in the worker compute target to be used for training and additional PIP packages to install on those nodes as a part of setup.\n",
+ "Define a `WorkerConfiguration` using your worker compute target. We specify the number of nodes in the worker compute target to be used for training and additional PIP packages to install on those nodes as a part of setup.\n",
"In this case, we define the PIP packages as dependencies for both head and worker nodes. With this setup, the game simulations will run directly on the worker compute nodes."
]
},
@@ -285,7 +289,7 @@
"# Specify the Ray worker configuration\n",
"worker_conf = WorkerConfiguration(\n",
" \n",
- " # Azure ML compute cluster to run Ray workers\n",
+ " # Azure Machine Learning compute target to run Ray workers\n",
" compute_target=worker_compute_target, \n",
" \n",
" # Number of worker nodes\n",
@@ -305,7 +309,7 @@
"source": [
"### Create reinforcement learning estimator\n",
"\n",
- "The `ReinforcementLearningEstimator` is used to submit a job to Azure Machine Learning to start the Ray experiment run. We define the training script parameters here that will be passed to estimator. \n",
+ "The `ReinforcementLearningEstimator` is used to submit a job to Azure Machine Learning to start the Ray experiment run. We define the training script parameters here that will be passed to the estimator. \n",
"\n",
"We specify `episode_reward_mean` to 18 as we want to stop the training as soon as the trained agent reaches an average win margin of at least 18 point over opponent over all episodes in the training epoch.\n",
"Number of Ray worker processes are defined by parameter `num_workers`. We set it to 13 as we have 13 CPUs available in our compute targets. Multiple Ray worker processes parallelizes agent training and helps in achieving our goal faster. \n",
@@ -348,7 +352,7 @@
" \"--stop\": '\\'{\"episode_reward_mean\": 18, \"time_total_s\": 3600}\\'',\n",
"}\n",
"\n",
- "# RL estimator\n",
+ "# Reinforcement learning estimator\n",
"rl_estimator = ReinforcementLearningEstimator(\n",
" \n",
" # Location of source files\n",
@@ -361,7 +365,7 @@
" # Defined above.\n",
" script_params=script_params,\n",
" \n",
- " # The Azure ML compute target set up for Ray head nodes\n",
+ " # The Azure Machine Learning compute target set up for Ray head nodes\n",
" compute_target=head_compute_target,\n",
" \n",
" # Pip packages\n",
@@ -370,7 +374,7 @@
" # GPU usage\n",
" use_gpu=True,\n",
" \n",
- " # RL framework. Currently must be Ray.\n",
+ " # Reinforcement learning framework. Currently must be Ray.\n",
" rl_framework=Ray(),\n",
" \n",
" # Ray worker configuration defined above.\n",
@@ -394,23 +398,24 @@
"metadata": {},
"source": [
"### Training script\n",
- "As recommended in [RLLib](https://ray.readthedocs.io/en/latest/rllib.html) documentations, we use Ray [Tune](https://ray.readthedocs.io/en/latest/tune.html) API to run training algorithm. All the RLLib built-in trainers are compatible with the Tune API. Here we use tune.run() to execute a built-in training algorithm. For convenience, down below you can see part of the entry script where we make this call.\n",
+ "As recommended in [RLlib](https://ray.readthedocs.io/en/latest/rllib.html) documentations, we use Ray [Tune](https://ray.readthedocs.io/en/latest/tune.html) API to run the training algorithm. All the RLlib built-in trainers are compatible with the Tune API. Here we use tune.run() to execute a built-in training algorithm. For convenience, down below you can see part of the entry script where we make this call.\n",
"\n",
"```python\n",
- " tune.run(run_or_experiment=args.run,\n",
- " config={\n",
- " \"env\": args.env,\n",
- " \"num_gpus\": args.config[\"num_gpus\"],\n",
- " \"num_workers\": args.config[\"num_workers\"],\n",
- " \"callbacks\": {\"on_train_result\": callbacks.on_train_result},\n",
- " \"sample_batch_size\": 50,\n",
- " \"train_batch_size\": 1000,\n",
- " \"num_sgd_iter\": 2,\n",
- " \"num_data_loader_buffers\": 2,\n",
- " \"model\": {\"dim\": 42},\n",
- " },\n",
- " stop=args.stop,\n",
- " local_dir='./logs')\n",
+ " tune.run(\n",
+ " run_or_experiment=args.run,\n",
+ " config={\n",
+ " \"env\": args.env,\n",
+ " \"num_gpus\": args.config[\"num_gpus\"],\n",
+ " \"num_workers\": args.config[\"num_workers\"],\n",
+ " \"callbacks\": {\"on_train_result\": callbacks.on_train_result},\n",
+ " \"sample_batch_size\": 50,\n",
+ " \"train_batch_size\": 1000,\n",
+ " \"num_sgd_iter\": 2,\n",
+ " \"num_data_loader_buffers\": 2,\n",
+ " \"model\": {\"dim\": 42},\n",
+ " },\n",
+ " stop=args.stop,\n",
+ " local_dir='./logs')\n",
"```"
]
},
@@ -437,7 +442,7 @@
"source": [
"### Monitor the run\n",
"\n",
- "Azure ML provides a Jupyter widget to show the real-time status of an experiment run. You could use this widget to monitor the status of runs. The widget shows the list of two child runs, one for head compute target run and one for worker compute target run, as well. You can click on the link under Status to see the details of the child run."
+ "Azure Machine Learning provides a Jupyter widget to show the status of an experiment run. You could use this widget to monitor the status of the runs. The widget shows the list of two child runs, one for head compute target run and one for worker compute target run. You can click on the link under **Status** to see the details of the child run. It will also show the metrics being logged."
]
},
{
@@ -455,9 +460,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Wait for the run to complete before proceeding. If you want to stop the run, you may skip this and move to next section below. \n",
+ "### Stop the run\n",
"\n",
- "**Note: the run may take anywhere from 30 minutes to 45 minutes to complete.**"
+ "To stop the run, call `run.cancel()`."
]
},
{
@@ -466,16 +471,18 @@
"metadata": {},
"outputs": [],
"source": [
- "run.wait_for_completion()"
+ "# Uncomment line below to cancel the run\n",
+ "# run.cancel()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Stop the run\n",
+ "### Wait for completion\n",
+ "Wait for the run to complete before proceeding. If you want to stop the run, you may skip this and move to next section below. \n",
"\n",
- "To cancel the run, call run.cancel()."
+ "**Note: The run may take anywhere from 30 minutes to 45 minutes to complete.**"
]
},
{
@@ -484,7 +491,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# run.cancel()"
+ "run.wait_for_completion()"
]
},
{
@@ -539,8 +546,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "We observe that during the training over multiple episodes, the agent learn to win the Pong game against opponent with our target of 18 points in each episode of 21 points.\n",
- "**Congratulations!! You have trained your Pong agent to win a game marvelously.**"
+ "We observe that during the training over multiple episodes, the agent learns to win the Pong game against opponent with our target of 18 points in each episode of 21 points.\n",
+ "**Congratulations!! You have trained your Pong agent to win a game.**"
]
},
{
@@ -570,7 +577,7 @@
"metadata": {},
"source": [
"## Next\n",
- "In this example, you learnt how to solve distributed RL training problems using head and worker compute targets. This is currently the last introductory tutorial for Azure Machine Learning service's Reinforcement Learning offering. We would love to hear your feedback to build the features you need!"
+ "In this example, you learned how to solve distributed reinforcement learning training problems using head and worker compute targets. This was an introductory tutorial on Reinforement Learning in Azure Machine Learning service offering. We would love to hear your feedback to build the features you need!"
]
}
],
@@ -595,7 +602,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.7.4"
+ "version": "3.6.9"
},
"notice": "Copyright (c) Microsoft Corporation. All rights reserved.\u00e2\u20ac\u00afLicensed under the MIT License.\u00e2\u20ac\u00af "
},
diff --git a/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb b/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb
index 19bc54f2d..24476e042 100644
--- a/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb
+++ b/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb
@@ -20,11 +20,11 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Azure ML Reinforcement Learning Sample - Cartpole Problem on Compute Instance\n",
+ "# Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Compute Instance\n",
"\n",
- "Azure ML Reinforcement Learning (Azure ML RL) is a managed service for running reinforcement learning training and simulation. With Azure MLRL, data scientists can start developing RL systems on one machine, and scale to compute clusters with 100\u00e2\u20ac\u2122s of nodes if needed.\n",
+ "Reinforcement Learning in Azure Machine Learning is a managed service for running reinforcement learning training and simulation. With Reinforcement Learning in Azure Machine Learning, data scientists can start developing reinforcement learning systems on one machine, and scale to compute targets with 100\u00e2\u20ac\u2122s of nodes if needed.\n",
"\n",
- "This example shows how to use Azure ML RL to train a Cartpole playing agent on a compute instance."
+ "This example shows how to use Reinforcement Learning in Azure Machine Learning to train a Cartpole playing agent on a compute instance."
]
},
{
@@ -56,7 +56,7 @@
"metadata": {},
"source": [
"### Prerequisite\n",
- "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription id, a resource group and a workspace. All datastores and datasets you use should be associated with your workspace."
+ "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription ID, a resource group, and an Azure Machine Learning workspace. All datastores and datasets you use should be associated with your workspace."
]
},
{
@@ -75,8 +75,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Azure ML SDK \n",
- "Display the Azure ML SDK version."
+ "### Azure Machine Learning SDK \n",
+ "Display the Azure Machine Learning SDK version."
]
},
{
@@ -86,15 +86,15 @@
"outputs": [],
"source": [
"import azureml.core\n",
- "print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
+ "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Get Azure ML workspace\n",
- "Get a reference to an existing Azure ML workspace."
+ "### Get Azure Machine Learning workspace\n",
+ "Get a reference to an existing Azure Machine Learning workspace."
]
},
{
@@ -163,18 +163,22 @@
"source": [
"# Load current compute instance info\n",
"current_compute_instance = load_nbvm()\n",
- "print(\"Current compute instance:\", current_compute_instance)\n",
"\n",
"# For this demo, let's use the current compute instance as the compute target, if available\n",
"if current_compute_instance:\n",
+ " print(\"Current compute instance:\", current_compute_instance)\n",
" instance_name = current_compute_instance['instance']\n",
"else:\n",
" instance_name = next(iter(ws.compute_targets))\n",
+ " print(\"Instance name:\", instance_name)\n",
"\n",
"compute_target = ws.compute_targets[instance_name]\n",
"\n",
"print(\"Compute target status:\")\n",
- "print(compute_target.get_status().serialize())\n",
+ "try:\n",
+ " print(compute_target.get_status().serialize())\n",
+ "except:\n",
+ " print(compute_target.get_status())\n",
"\n",
"print(\"Compute target size:\")\n",
"print(compute_target.size(ws))"
@@ -184,7 +188,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Create Azure ML experiment\n",
+ "### Create Azure Machine Learning experiment\n",
"Create an experiment to track the runs in your workspace. "
]
},
@@ -204,8 +208,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Train Cartpole Agent Using Azure ML RL\n",
- "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct RL run configurations for the underlying RL framework. Azure ML RL initially supports the [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. "
+ "## Train Cartpole Agent\n",
+ "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct reinforcement learning run configurations for the underlying reinforcement learning framework. Reinforcement Learning in Azure Machine Learning supports the open source [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. "
]
},
{
@@ -222,7 +226,7 @@
"- `entry_script`, path to your entry script relative to the source directory,\n",
"- `script_params`, constant parameters to be passed to each run of training script,\n",
"- `compute_target`, reference to the compute target in which the trainer and worker(s) jobs will be executed,\n",
- "- `rl_framework`, the RL framework to be used (currently must be Ray).\n",
+ "- `rl_framework`, the reinforcement learning framework to be used (currently must be Ray).\n",
"\n",
"We use the `script_params` parameter to pass in general and algorithm-specific parameters to the training script.\n"
]
@@ -273,10 +277,10 @@
" # A dictionary of arguments to pass to the training script specified in ``entry_script``\n",
" script_params=script_params,\n",
" \n",
- " # The Azure ML compute target set up for Ray head nodes\n",
+ " # The Azure Machine Learning compute target set up for Ray head nodes\n",
" compute_target=compute_target,\n",
" \n",
- " # RL framework. Currently must be Ray.\n",
+ " # Reinforcement learning framework. Currently must be Ray.\n",
" rl_framework=Ray()\n",
")"
]
@@ -345,11 +349,11 @@
"metadata": {},
"source": [
"### Monitor experiment\n",
- "Azure ML provides a Jupyter widget to show the real-time status of an experiment run. You could use this widget to monitor status of the runs.\n",
+ "Azure Machine Learning provides a Jupyter widget to show the status of an experiment run. You could use this widget to monitor the status of the runs.\n",
"\n",
"Note that _ReinforcementLearningEstimator_ creates at least two runs: (a) A parent run, i.e. the run returned above, and (b) a collection of child runs. The number of the child runs depends on the configuration of the reinforcement learning estimator. In our simple scenario, configured above, only one child run will be created.\n",
"\n",
- "The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run."
+ "The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run. It will also show the metrics being logged."
]
},
{
@@ -369,7 +373,7 @@
"source": [
"### Stop the run\n",
"\n",
- "To cancel the run, call `training_run.cancel()`."
+ "To stop the run, call `training_run.cancel()`."
]
},
{
@@ -577,10 +581,10 @@
" training_artifacts_ds.as_named_input('artifacts_dataset'),\n",
" training_artifacts_ds.as_named_input('artifacts_path').as_mount()],\n",
" \n",
- " # The Azure ML compute target\n",
+ " # The Azure Machine Learning compute target\n",
" compute_target=compute_target,\n",
" \n",
- " # RL framework. Currently must be Ray.\n",
+ " # Reinforcement learning framework. Currently must be Ray.\n",
" rl_framework=Ray(),\n",
" \n",
" # Additional pip packages to install\n",
@@ -662,7 +666,7 @@
"metadata": {},
"source": [
"## Next\n",
- "This example was about running Azure ML RL (Ray/RLlib Framework) on compute instance. Please see [Cartpole problem](../cartpole-on-single-compute/cartpole_cc.ipynb)\n",
+ "This example was about running Reinforcement Learning in Azure Machine Learning (Ray/RLlib Framework) on a compute instance. Please see [Cartpole Problem on Single Compute](../cartpole-on-single-compute/cartpole_sc.ipynb)\n",
"example which uses Ray RLlib to train a Cartpole playing agent on a single node remote compute.\n"
]
}
diff --git a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.ipynb b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb
similarity index 91%
rename from how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.ipynb
rename to how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb
index 15fb9aa27..df30d078e 100644
--- a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.ipynb
+++ b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb
@@ -13,18 +13,18 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- ""
+ ""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Azure ML Reinforcement Learning Sample - Cartpole Problem\n",
+ "# Reinforcement Learning in Azure Machine Learning - Cartpole Problem on Single Compute\n",
"\n",
- "Azure ML Reinforcement Learning (Azure ML RL) is a managed service for running reinforcement learning training and simulation. With Azure MLRL, data scientists can start developing RL systems on one machine, and scale to compute clusters with 100\u00e2\u20ac\u2122s of nodes if needed.\n",
+ "Reinforcement Learning in Azure Machine Learning is a managed service for running reinforcement learning training and simulation. With Reinforcement Learning in Azure Machine Learning, data scientists can start developing reinforcement learning systems on one machine, and scale to compute targets with 100\u00e2\u20ac\u2122s of nodes if needed.\n",
"\n",
- "This example shows how to use Azure ML RL to train a Cartpole playing agent on a single machine. "
+ "This example shows how to use Reinforcement Learning in Azure Machine Learning to train a Cartpole playing agent on a single compute. "
]
},
{
@@ -56,7 +56,7 @@
"metadata": {},
"source": [
"### Prerequisite\n",
- "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription id, a resource group and a workspace. All datastores and datasets you use should be associated with your workspace."
+ "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription ID, a resource group, and an Azure Machine Learning workspace. All datastores and datasets you use should be associated with your workspace."
]
},
{
@@ -75,8 +75,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Azure ML SDK \n",
- "Display the Azure ML SDK version."
+ "### Azure Machine Learning SDK \n",
+ "Display the Azure Machine Learning SDK version."
]
},
{
@@ -87,15 +87,15 @@
"source": [
"import azureml.core\n",
"\n",
- "print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
+ "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Get Azure ML workspace\n",
- "Get a reference to an existing Azure ML workspace."
+ "### Get Azure Machine Learning workspace\n",
+ "Get a reference to an existing Azure Machine Learning workspace."
]
},
{
@@ -118,7 +118,7 @@
"\n",
"A compute target is a designated compute resource where you run your training and simulation scripts. This location may be your local machine or a cloud-based compute resource. The code below shows how to create a cloud-based compute target. For more information see [What are compute targets in Azure Machine Learning?](https://docs.microsoft.com/en-us/azure/machine-learning/concept-compute-target)\n",
"\n",
- "**Note: Creation of a compute resource can take several minutes**"
+ "**Note: Creation of a compute resource can take several minutes**. Please make sure to change `STANDARD_D2_V2` to a [size available in your region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines)."
]
},
{
@@ -158,7 +158,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Create Azure ML experiment\n",
+ "### Create Azure Machine Learning experiment\n",
"Create an experiment to track the runs in your workspace. "
]
},
@@ -178,8 +178,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Train Cartpole Agent Using Azure ML RL\n",
- "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct RL run configurations for the underlying RL framework. Azure ML RL initially supports the [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. "
+ "## Train Cartpole Agent\n",
+ "To facilitate reinforcement learning, Azure Machine Learning Python SDK provides a high level abstraction, the _ReinforcementLearningEstimator_ class, which allows users to easily construct reinforcement learning run configurations for the underlying reinforcement learning framework. Reinforcement Learning in Azure Machine Learning supports the open source [Ray framework](https://ray.io/) and its highly customizable [RLlib](https://ray.readthedocs.io/en/latest/rllib.html#rllib-scalable-reinforcement-learning). In this section we show how to use _ReinforcementLearningEstimator_ and Ray/RLlib framework to train a cartpole playing agent. "
]
},
{
@@ -196,7 +196,7 @@
"- `entry_script`, path to your entry script relative to the source directory,\n",
"- `script_params`, constant parameters to be passed to each run of training script,\n",
"- `compute_target`, reference to the compute target in which the trainer and worker(s) jobs will be executed,\n",
- "- `rl_framework`, the RL framework to be used (currently must be Ray).\n",
+ "- `rl_framework`, the reinforcement learning framework to be used (currently must be Ray).\n",
"\n",
"We use the `script_params` parameter to pass in general and algorithm-specific parameters to the training script.\n"
]
@@ -249,7 +249,7 @@
" # There are two parts to this:\n",
" # 1. Use a custom docker file with proper instructions to install xvfb, ffmpeg, python-opengl\n",
" # and other dependencies. \n",
- " # TODO: Add these instructions to default rl base image and drop this docker file.\n",
+ " # TODO: Add these instructions to default reinforcement learning base image and drop this docker file.\n",
" \n",
" with open(\"files/docker/Dockerfile\", \"r\") as f:\n",
" dockerfile=f.read()\n",
@@ -274,10 +274,10 @@
" # A dictionary of arguments to pass to the training script specified in ``entry_script``\n",
" script_params=script_params,\n",
" \n",
- " # The Azure ML compute target set up for Ray head nodes\n",
+ " # The Azure Machine Learning compute target set up for Ray head nodes\n",
" compute_target=compute_target,\n",
" \n",
- " # RL framework. Currently must be Ray.\n",
+ " # Reinforcement learning framework. Currently must be Ray.\n",
" rl_framework=Ray(),\n",
" \n",
" # Custom environmnet for Xvfb\n",
@@ -350,11 +350,11 @@
"source": [
"### Monitor experiment\n",
"\n",
- "Azure ML provides a Jupyter widget to show the real-time status of an experiment run. You could use this widget to monitor status of the runs.\n",
+ "Azure Machine Learning provides a Jupyter widget to show the status of an experiment run. You could use this widget to monitor the status of the runs.\n",
"\n",
"Note that _ReinforcementLearningEstimator_ creates at least two runs: (a) A parent run, i.e. the run returned above, and (b) a collection of child runs. The number of the child runs depends on the configuration of the reinforcement learning estimator. In our simple scenario, configured above, only one child run will be created.\n",
"\n",
- "The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run."
+ "The widget will show a list of the child runs as well. You can click on the link under **Status** to see the details of a child run. It will also show the metrics being logged."
]
},
{
@@ -373,7 +373,7 @@
"metadata": {},
"source": [
"### Stop the run\n",
- "To cancel the run, call `training_run.cancel()`."
+ "To stop the run, call `training_run.cancel()`."
]
},
{
@@ -393,7 +393,7 @@
"### Wait for completion\n",
"Wait for the run to complete before proceeding.\n",
"\n",
- "**Note: The length of the run depends on the provisioning time of the compute target and may take several minutes to complete.**"
+ "**Note: The length of the run depends on the provisioning time of the compute target and it may take several minutes to complete.**"
]
},
{
@@ -560,18 +560,20 @@
" dir_util.mkpath(destination)\n",
" \n",
" try:\n",
- " # Mount dataset and copy movies\n",
+ " pirnt(\"Trying mounting dataset and copying movies.\")\n",
" # Note: We assume movie paths start with '\\'\n",
" mount_context = artifacts_ds.mount()\n",
" mount_context.start()\n",
- " print('Download started.')\n",
" for movie in movies:\n",
" print('Copying {} ...'.format(movie))\n",
" shutil.copy2(path.join(mount_context.mount_point, movie[1:]), destination)\n",
" mount_context.stop()\n",
" except:\n",
- " print(\"Mounting error! Downloading all artifacts ...\")\n",
- " artifacts_ds.download(target_path=destination, overwrite=True)\n",
+ " print(\"Mounting failed! Going with dataset download.\")\n",
+ " for i, file in enumerate(artifacts_ds.to_path()):\n",
+ " if file in movies:\n",
+ " print('Downloading {} ...'.format(file))\n",
+ " artifacts_ds.skip(i).take(1).download(target_path=destination, overwrite=True)\n",
" \n",
" print('Downloading movies completed!')\n",
"\n",
@@ -625,7 +627,7 @@
"print(\"Last movie:\", last_movie)\n",
"\n",
"# Download movies\n",
- "training_movies_path = \"training\"\n",
+ "training_movies_path = path.join(\"training\", \"videos\")\n",
"download_movies(training_artifacts_ds, [first_movie, last_movie], training_movies_path)"
]
},
@@ -781,7 +783,7 @@
"# 1. Use a custom docker file with proper instructions to install xvfb, ffmpeg, python-opengl\n",
"# and other dependencies.\n",
"# Note: Even when the rendering is off pyhton-opengl is needed.\n",
- "# TODO: Add these instructions to default rl base image and drop this docker file.\n",
+ "# TODO: Add these instructions to default reinforcement learning base image and drop this docker file.\n",
"\n",
"with open(\"files/docker/Dockerfile\", \"r\") as f:\n",
" dockerfile=f.read()\n",
@@ -811,10 +813,10 @@
" training_artifacts_ds.as_named_input('artifacts_dataset'),\n",
" training_artifacts_ds.as_named_input('artifacts_path').as_mount()],\n",
" \n",
- " # The Azure ML compute target set up for Ray head nodes\n",
+ " # The Azure Machine Learning compute target set up for Ray head nodes\n",
" compute_target=compute_target,\n",
" \n",
- " # RL framework. Currently must be Ray.\n",
+ " # Reinforcement learning framework. Currently must be Ray.\n",
" rl_framework=Ray(),\n",
" \n",
" # Custom environmnet for Xvfb\n",
@@ -928,7 +930,7 @@
"print(\"Last movie:\", last_movie)\n",
"\n",
"# Download last movie\n",
- "rollout_movies_path = \"rollout\"\n",
+ "rollout_movies_path = path.join(\"rollout\", \"videos\")\n",
"download_movies(rollout_artifacts_ds, [last_movie], rollout_movies_path)\n",
"\n",
"# Look for the downloaded movie in local directory\n",
@@ -996,7 +998,7 @@
"metadata": {},
"source": [
"## Next\n",
- "This example was about running Azure ML RL (Ray/RLlib Framework) on a single node. Please see [Pong problem](../atari-on-distributed-compute/pong_rllib.ipynb)\n",
+ "This example was about running Reinforcement Learning in Azure Machine Learning (Ray/RLlib Framework) on a single compute. Please see [Pong Problem](../atari-on-distributed-compute/pong_rllib.ipynb)\n",
"example which uses Ray RLlib to train a Pong playing agent on a multi-node cluster."
]
}
diff --git a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.yml b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.yml
similarity index 84%
rename from how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.yml
rename to how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.yml
index 0ac02b815..48d5edfa8 100644
--- a/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.yml
+++ b/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.yml
@@ -1,4 +1,4 @@
-name: cartpole_cc
+name: cartpole_sc
dependencies:
- pip:
- azureml-sdk
diff --git a/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/files/minecraft_train.py b/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/files/minecraft_train.py
index a3e045293..d97411d61 100644
--- a/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/files/minecraft_train.py
+++ b/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/files/minecraft_train.py
@@ -1,3 +1,5 @@
+import os
+
import ray
import ray.tune as tune
@@ -6,8 +8,10 @@
def stop(trial_id, result):
+ max_train_time = int(os.environ.get("AML_MAX_TRAIN_TIME_SECONDS", 5 * 60 * 60))
+
return result["episode_reward_mean"] >= 1 \
- or result["time_total_s"] > 5 * 60 * 60
+ or result["time_total_s"] >= max_train_time
if __name__ == '__main__':
diff --git a/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/minecraft.ipynb b/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/minecraft.ipynb
index 98b30ccc2..5352050e5 100644
--- a/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/minecraft.ipynb
+++ b/how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/minecraft.ipynb
@@ -110,7 +110,7 @@
"outputs": [],
"source": [
"import azureml.core\n",
- "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)"
+ "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION) "
]
},
{
@@ -297,8 +297,11 @@
"metadata": {},
"outputs": [],
"source": [
+ "import os\n",
"from azureml.core import Environment\n",
"\n",
+ "max_train_time = os.environ.get(\"AML_MAX_TRAIN_TIME_SECONDS\", 5 * 60 * 60)\n",
+ "\n",
"def create_env(env_type):\n",
" env = Environment(name='minecraft-{env_type}'.format(env_type=env_type))\n",
"\n",
@@ -306,6 +309,7 @@
" env.docker.base_image = 'akdmsft/minecraft-{env_type}'.format(env_type=env_type)\n",
"\n",
" env.python.interpreter_path = \"xvfb-run -s '-screen 0 640x480x16 -ac +extension GLX +render' python\"\n",
+ " env.environment_variables[\"AML_MAX_TRAIN_TIME_SECONDS\"] = str(max_train_time)\n",
" env.python.user_managed_dependencies = True\n",
" \n",
" return env\n",
@@ -590,7 +594,6 @@
"outputs": [],
"source": [
"import re\n",
- "import os\n",
"import tempfile\n",
"\n",
"from azureml.core import Dataset\n",
diff --git a/how-to-use-azureml/reinforcement-learning/setup/devenv_setup.ipynb b/how-to-use-azureml/reinforcement-learning/setup/devenv_setup.ipynb
index 78a5d4cb8..35e5dfd4b 100644
--- a/how-to-use-azureml/reinforcement-learning/setup/devenv_setup.ipynb
+++ b/how-to-use-azureml/reinforcement-learning/setup/devenv_setup.ipynb
@@ -20,7 +20,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Azure ML Reinforcement Learning Sample - Setting Up Development Environment\n",
+ "# Reinforcement Learning in Azure Machine Learning - Setting Up Development Environment\n",
"\n",
"Ray multi-node cluster setup requires all worker nodes to be able to communicate with the head node. This notebook explains you how to setup a virtual network, to be used by the Ray head and worker compute targets, created and used in other notebook examples."
]
@@ -31,7 +31,7 @@
"source": [
"### Prerequisite\n",
"\n",
- "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription id, a resource group and a workspace."
+ "The user should have completed the Azure Machine Learning Tutorial: [Get started creating your first ML experiment with the Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-1st-experiment-sdk-setup). You will need to make sure that you have a valid subscription ID, a resource group, and an Azure Machine Learning workspace."
]
},
{
@@ -48,19 +48,17 @@
"metadata": {},
"outputs": [],
"source": [
- "# Azure ML Core imports\n",
"import azureml.core\n",
"\n",
- "# Check core SDK version number\n",
- "print(\"Azure ML SDK Version: \", azureml.core.VERSION)"
+ "print(\"Azure Machine Learning SDK Version: \", azureml.core.VERSION)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Get Azure ML workspace\n",
- "Get a reference to an existing Azure ML workspace. Please make sure that the VM sizes `STANDARD_NC6` and `STANDARD_D2_V2` are supported in the workspace's region.\n"
+ "### Get Azure Machine Learning workspace\n",
+ "Get a reference to an existing Azure Machine Learning workspace. Please make sure to change `STANDARD_NC6` and `STANDARD_D2_V2` to [the ones available in your region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines).\n"
]
},
{
@@ -72,7 +70,7 @@
"from azureml.core import Workspace\n",
"\n",
"ws = Workspace.from_config()\n",
- "print(ws.name, ws.location, ws.resource_group, sep = ' | ') "
+ "print(ws.name, ws.location, ws.resource_group, sep = ' | ')"
]
},
{
@@ -115,7 +113,7 @@
"# The Azure subscription you are using\n",
"subscription_id=ws.subscription_id\n",
"\n",
- "# The resource group for the RL cluster\n",
+ "# The resource group for the reinforcement learning cluster\n",
"resource_group=ws.resource_group\n",
"\n",
"# Azure region of the resource group\n",
@@ -135,7 +133,7 @@
")\n",
"\n",
"async_vnet_creation.wait()\n",
- "print(\"VNet created successfully: \", async_vnet_creation.result())"
+ "print(\"Virtual network created successfully: \", async_vnet_creation.result())"
]
},
{
@@ -169,7 +167,7 @@
" azure.mgmt.network.models.SecurityRule(\n",
" name=security_rule_name,\n",
" access=azure.mgmt.network.models.SecurityRuleAccess.allow,\n",
- " description='Azure ML RL rule',\n",
+ " description='Reinforcement Learning in Azure Machine Learning rule',\n",
" destination_address_prefix='*',\n",
" destination_port_range='29876-29877',\n",
" direction=azure.mgmt.network.models.SecurityRuleDirection.inbound,\n",
@@ -202,7 +200,7 @@
" network_security_group=network_security_group\n",
" )\n",
" \n",
- "# Create subnet on vnet\n",
+ "# Create subnet on virtual network\n",
"async_subnet_creation = network_client.subnets.create_or_update(\n",
" resource_group_name=resource_group,\n",
" virtual_network_name=vnet_name,\n",
diff --git a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb
index b2f8ef691..762f9a8e3 100644
--- a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb
+++ b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb
@@ -100,7 +100,7 @@
"\n",
"# Check core SDK version number\n",
"\n",
- "print(\"This notebook was created using SDK version 1.5.0, you are currently running version\", azureml.core.VERSION)"
+ "print(\"This notebook was created using SDK version 1.6.0, you are currently running version\", azureml.core.VERSION)"
]
},
{
diff --git a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb
index 83ff13b5d..28749b7e5 100644
--- a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb
+++ b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard.ipynb
@@ -439,6 +439,8 @@
"metadata": {},
"outputs": [],
"source": [
+ "from azureml.train.dnn import TensorFlow\n",
+ "\n",
"script_params = {\"--log_dir\": \"./logs\"}\n",
"\n",
"# If you want the run to go longer, set --max-steps to a higher number.\n",
diff --git a/how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb b/how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb
index 3cacab4b9..a6c4ae27d 100644
--- a/how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb
+++ b/how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb
@@ -144,25 +144,18 @@
"import os\n",
"\n",
"try:\n",
- " # if you want to connect using SSH key instead of username/password you can provide parameters private_key_file and private_key_passphrase\n",
- " attach_config = HDInsightCompute.attach_configuration(address=os.environ.get('hdiservername', '-ssh.azurehdinsight.net'), \n",
- " ssh_port=22, \n",
- " username=os.environ.get('hdiusername', ''), \n",
+ "# If you want to connect using SSH key instead of username/password you can provide parameters private_key_file and private_key_passphrase\n",
+ "\n",
+ "# Attaching a HDInsight cluster using the public address of the HDInsight cluster is no longer supported.\n",
+ "# Instead, use resourceId of the HDInsight cluster.\n",
+ "# The resourceId of the HDInsight Cluster can be constructed using the following string format:\n",
+ "# /subscriptions//resourceGroups//providers/Microsoft.HDInsight/clusters/.\n",
+ "# You can also use subscription_id, resource_group and cluster_name without constructing resourceId.\n",
+ " attach_config = HDInsightCompute.attach_configuration(resource_id='',\n",
+ " ssh_port=22,\n",
+ " username=os.environ.get('hdiusername', ''),\n",
" password=os.environ.get('hdipassword', ''))\n",
"\n",
- "# The following Azure regions do not support attaching a HDI Cluster using the public IP address of the HDI Cluster.\n",
- "# Instead, use the Azure Resource Manager ID of the HDI Cluster with the resource_id parameter:\n",
- "# US East\n",
- "# US West 2\n",
- "# US South Central\n",
- "# The resource ID of the HDI Cluster can be constructed using the\n",
- "# subscription ID, resource group name, and cluster name using the following string format:\n",
- "# /subscriptions//resourceGroups//providers/Microsoft.HDInsight/clusters/. \n",
- "# If in US East, US West 2, or US South Central, use the following instead:\n",
- "# attach_config = HDInsightCompute.attach_configuration(resource_id='',\n",
- "# ssh_port=22,\n",
- "# username=os.environ.get('hdiusername', ''),\n",
- "# password=os.environ.get('hdipassword', ''))\n",
" hdi_compute = ComputeTarget.attach(workspace=ws, \n",
" name='myhdi', \n",
" attach_configuration=attach_config)\n",
diff --git a/how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb b/how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb
index 6216378da..ff3d93696 100644
--- a/how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb
+++ b/how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb
@@ -268,23 +268,15 @@
" private_key_file='./.ssh/id_rsa')\n",
"\n",
"\n",
- "# The following Azure regions do not support attaching a virtual machine using the public IP address of the VM.\n",
- "# Instead, use the Azure Resource Manager ID of the VM with the resource_id parameter:\n",
- "# US East\n",
- "# US West 2\n",
- "# US South Central\n",
- "# The resource ID of the VM can be constructed using the\n",
- "# subscription ID, resource group name, and VM name using the following string format:\n",
- "# /subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/. \n",
- "# If in US East, US West 2, or US South Central, use the following instead:\n",
- "# attach_config = RemoteCompute.attach_configuration(resource_id='',\n",
- "# ssh_port=22,\n",
- "# username='username',\n",
- "# private_key_file='./.ssh/id_rsa')\n",
- "\n",
- " attached_dsvm_compute = ComputeTarget.attach(workspace=ws,\n",
- " name=compute_target_name,\n",
- " attach_configuration=attach_config)\n",
+ "# Attaching a virtual machine using the public IP address of the VM is no longer supported.\n",
+ "# Instead, use resourceId of the VM.\n",
+ "# The resourceId of the VM can be constructed using the following string format:\n",
+ "# /subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/.\n",
+ "# You can also use subscription_id, resource_group and vm_name without constructing resourceId.\n",
+ " attach_config = RemoteCompute.attach_configuration(resource_id='',\n",
+ " ssh_port=22,\n",
+ " username='username',\n",
+ " private_key_file='./.ssh/id_rsa')\n",
" attached_dsvm_compute.wait_for_completion(show_output=True)"
]
},
diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb
index 739fca0ea..d7367798a 100644
--- a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb
+++ b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb
@@ -279,7 +279,8 @@
" outputs=[prepared_fashion_ds],\n",
" source_directory=script_folder,\n",
" compute_target=compute_target,\n",
- " runconfig=run_config)"
+ " runconfig=run_config,\n",
+ " allow_reuse=False)"
]
},
{
diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.yml b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.yml
index e6b3df702..f33e94748 100644
--- a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.yml
+++ b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.yml
@@ -2,6 +2,5 @@ name: pipeline-for-image-classification
dependencies:
- pip:
- azureml-sdk
- - azureml-dataprep
- pandas<=0.23.4
- fuse
diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.yml b/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.yml
index a3471ade9..af9acab32 100644
--- a/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.yml
+++ b/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.yml
@@ -2,5 +2,4 @@ name: tabular-timeseries-dataset-filtering
dependencies:
- pip:
- azureml-sdk
- - azureml-dataprep
- pandas<=0.23.4
diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.yml b/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.yml
index 4f490f417..d13f92dc9 100644
--- a/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.yml
+++ b/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.yml
@@ -3,7 +3,6 @@ dependencies:
- pip:
- azureml-sdk
- azureml-widgets
- - azureml-dataprep
- pandas<=0.23.4
- fuse
- scikit-learn
diff --git a/index.md b/index.md
index 383e6669f..215a8461c 100644
--- a/index.md
+++ b/index.md
@@ -26,7 +26,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
| :star:[Datasets with ML Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb) | Train | Fashion MNIST | Remote | None | Azure ML | Dataset, Pipeline, Estimator, ScriptRun |
| :star:[Filtering data using Tabular Timeseiries Dataset related API](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.ipynb) | Filtering | NOAA | Local | None | Azure ML | Dataset, Tabular Timeseries |
| :star:[Train with Datasets (Tabular and File)](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb) | Train | Iris, Diabetes | Remote | None | Azure ML | Dataset, Estimator, ScriptRun |
-| [Forecasting away from training data](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-high-frequency/auto-ml-forecasting-function.ipynb) | Forecasting | None | Remote | None | Azure ML AutoML | Forecasting, Confidence Intervals |
+| [Forecasting away from training data](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb) | Forecasting | None | Remote | None | Azure ML AutoML | Forecasting, Confidence Intervals |
| [Automated ML run with basic edition features.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb) | Classification | Bankmarketing | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb) | Classification | Creditcard | AML Compute | None | None | remote_run, AutomatedML |
| [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
@@ -41,6 +41,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
| :star:[How to Setup a Schedule for a Published Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb) | Demonstrates the use of Schedules for Published Pipelines | Custom | AML Compute | None | Azure ML | None |
| [How to setup a versioned Pipeline Endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-versioned-pipeline-endpoints.ipynb) | Demonstrates the use of PipelineEndpoint to run a specific version of the Published Pipeline | Custom | AML Compute | None | Azure ML | None |
| :star:[How to use DataPath as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb) | Demonstrates the use of DataPath as a PipelineParameter | Custom | AML Compute | None | Azure ML | None |
+| :star:[How to use Dataset as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb) | Demonstrates the use of Dataset as a PipelineParameter | Custom | AML Compute | None | Azure ML | None |
| [How to use AdlaStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-adla-as-compute-target.ipynb) | Demonstrates the use of AdlaStep | Custom | Azure Data Lake Analytics | None | Azure ML | None |
| :star:[How to use DatabricksStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb) | Demonstrates the use of DatabricksStep | Custom | Azure Databricks | None | Azure ML, Azure Databricks | None |
| :star:[How to use AutoMLStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb) | Demonstrates the use of AutoMLStep | Custom | AML Compute | None | Automated Machine Learning | None |
@@ -113,7 +114,6 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
| [onnx-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-model-register-and-deploy.ipynb) | | | | | | |
| [production-deploy-to-aks](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb) | | | | | | |
| [production-deploy-to-aks-gpu](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.ipynb) | | | | | | |
-| [tensorflow-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/tensorflow/tensorflow-model-register-and-deploy.ipynb) | | | | | | |
| [explain-model-on-amlcompute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb) | | | | | | |
| [save-retrieve-explanations-run-history](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb) | | | | | | |
| [train-explain-model-locally-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb) | | | | | | |
@@ -123,7 +123,7 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an
| [authentication-in-azureml](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/manage-azureml-service/authentication-in-azureml/authentication-in-azureml.ipynb) | | | | | | |
| [pong_rllib](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb) | | | | | | |
| [cartpole_ci](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb) | | | | | | |
-| [cartpole_cc](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_cc.ipynb) | | | | | | |
+| [cartpole_sc](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb) | | | | | | |
| [minecraft](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/minecraft-on-distributed-compute/minecraft.ipynb) | | | | | | |
| [devenv_setup](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/setup/devenv_setup.ipynb) | | | | | | |
| [Logging APIs](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb) | Logging APIs and analyzing results | None | None | None | None | None |
diff --git a/setup-environment/configuration.ipynb b/setup-environment/configuration.ipynb
index 3f2eb1230..e0e840dac 100644
--- a/setup-environment/configuration.ipynb
+++ b/setup-environment/configuration.ipynb
@@ -102,7 +102,7 @@
"source": [
"import azureml.core\n",
"\n",
- "print(\"This notebook was created using version 1.5.0 of the Azure ML SDK\")\n",
+ "print(\"This notebook was created using version 1.6.0 of the Azure ML SDK\")\n",
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
]
},
diff --git a/tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb b/tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb
index 0c8d63345..01da453cc 100644
--- a/tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb
+++ b/tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb
@@ -386,4 +386,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/tutorials/machine-learning-pipelines-advanced/scripts/batch_scoring.py b/tutorials/machine-learning-pipelines-advanced/scripts/batch_scoring.py
index 3b5e3dbcf..f5f315e63 100644
--- a/tutorials/machine-learning-pipelines-advanced/scripts/batch_scoring.py
+++ b/tutorials/machine-learning-pipelines-advanced/scripts/batch_scoring.py
@@ -21,9 +21,10 @@
num_channel = 3
-def get_class_label_dict():
+def get_class_label_dict(labels_dir):
label = []
- proto_as_ascii_lines = tf.gfile.GFile("labels.txt").readlines()
+ labels_path = os.path.join(labels_dir, 'labels.txt')
+ proto_as_ascii_lines = tf.gfile.GFile(labels_path).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
@@ -34,14 +35,10 @@ def init():
parser = argparse.ArgumentParser(description="Start a tensorflow model serving")
parser.add_argument('--model_name', dest="model_name", required=True)
- parser.add_argument('--labels_name', dest="labels_name", required=True)
+ parser.add_argument('--labels_dir', dest="labels_dir", required=True)
args, _ = parser.parse_known_args()
- workspace = Run.get_context(allow_offline=False).experiment.workspace
- label_ds = Dataset.get_by_name(workspace=workspace, name=args.labels_name)
- label_ds.download(target_path='.', overwrite=True)
-
- label_dict = get_class_label_dict()
+ label_dict = get_class_label_dict(args.labels_dir)
classes_num = len(label_dict)
with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
diff --git a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb
index f5b06face..252e632d6 100644
--- a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb
+++ b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb
@@ -20,14 +20,8 @@
"metadata": {},
"source": [
"# Use Azure Machine Learning Pipelines for batch prediction\n",
- "\n",
- "## Note\n",
- "This notebook uses public preview functionality (ParallelRunStep). Please install azureml-contrib-pipeline-steps package before running this notebook.\n",
- "\n",
- "\n",
"In this tutorial, you use Azure Machine Learning service pipelines to run a batch scoring image classification job. The example job uses the pre-trained [Inception-V3](https://arxiv.org/abs/1512.00567) CNN (convolutional neural network) Tensorflow model to classify unlabeled images. Machine learning pipelines optimize your workflow with speed, portability, and reuse so you can focus on your expertise, machine learning, rather than on infrastructure and automation. After building and publishing a pipeline, you can configure a REST endpoint to enable triggering the pipeline from any HTTP library on any platform.\n",
"\n",
- "\n",
"In this tutorial, you learn the following tasks:\n",
"\n",
"> * Configure workspace and download sample data\n",
@@ -38,7 +32,7 @@
"> * Build, run, and publish a pipeline\n",
"> * Enable a REST endpoint for the pipeline\n",
"\n",
- "If you don\u00e2\u20ac\u2122t have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning service](https://aka.ms/AMLFree) today."
+ "If you don't have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning service](https://aka.ms/AMLFree) today."
]
},
{
@@ -129,7 +123,7 @@
"from azureml.pipeline.core import PipelineData\n",
"\n",
"input_images = Dataset.File.from_files((batchscore_blob, \"batchscoring/images/\"))\n",
- "label_ds = Dataset.File.from_files((batchscore_blob, \"batchscoring/labels/*.txt\"))\n",
+ "label_ds = Dataset.File.from_files((batchscore_blob, \"batchscoring/labels/\"))\n",
"output_dir = PipelineData(name=\"scores\", \n",
" datastore=def_data_store, \n",
" output_path_on_compute=\"batchscoring/results\")"
@@ -149,7 +143,7 @@
"outputs": [],
"source": [
"input_images = input_images.register(workspace = ws, name = \"input_images\")\n",
- "label_ds = label_ds.register(workspace = ws, name = \"label_ds\")"
+ "label_ds = label_ds.register(workspace = ws, name = \"label_ds\", create_new_version=True)"
]
},
{
@@ -260,7 +254,7 @@
"The script `batch_scoring.py` takes the following parameters, which get passed from the `ParallelRunStep` that you create later:\n",
"\n",
"- `--model_name`: the name of the model being used\n",
- "- `--labels_name` : the name of the `Dataset` holding the `labels.txt` file \n",
+ "- `--labels_dir` : the directory path having the `labels.txt` file \n",
"\n",
"The pipelines infrastructure uses the `ArgumentParser` class to pass parameters into pipeline steps. For example, in the code below the first argument `--model_name` is given the property identifier `model_name`. In the `main()` function, this property is accessed using `Model.get_model_path(args.model_name)`."
]
@@ -296,7 +290,8 @@
"from azureml.core.conda_dependencies import CondaDependencies\n",
"from azureml.core.runconfig import DEFAULT_GPU_IMAGE\n",
"\n",
- "cd = CondaDependencies.create(pip_packages=[\"tensorflow-gpu==1.15.2\", \"azureml-defaults\"])\n",
+ "cd = CondaDependencies.create(pip_packages=[\"tensorflow-gpu==1.15.2\",\n",
+ " \"azureml-core\", \"azureml-dataprep[fuse]\"])\n",
"\n",
"env = Environment(name=\"parallelenv\")\n",
"env.python.conda_dependencies=cd\n",
@@ -317,7 +312,7 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.contrib.pipeline.steps import ParallelRunConfig\n",
+ "from azureml.pipeline.steps import ParallelRunConfig\n",
"\n",
"parallel_run_config = ParallelRunConfig(\n",
" environment=env,\n",
@@ -356,18 +351,20 @@
"metadata": {},
"outputs": [],
"source": [
- "from azureml.contrib.pipeline.steps import ParallelRunStep\n",
+ "from azureml.pipeline.steps import ParallelRunStep\n",
"from datetime import datetime\n",
"\n",
"parallel_step_name = \"batchscoring-\" + datetime.now().strftime(\"%Y%m%d%H%M\")\n",
"\n",
+ "label_config = label_ds.as_named_input(\"labels_input\")\n",
+ "\n",
"batch_score_step = ParallelRunStep(\n",
" name=parallel_step_name,\n",
" inputs=[input_images.as_named_input(\"input_images\")],\n",
" output=output_dir,\n",
- " models=[model],\n",
" arguments=[\"--model_name\", \"inception\",\n",
- " \"--labels_name\", \"label_ds\"],\n",
+ " \"--labels_dir\", label_config],\n",
+ " side_inputs=[label_config],\n",
" parallel_run_config=parallel_run_config,\n",
" allow_reuse=False\n",
")"
diff --git a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.yml b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.yml
index 1e896b846..bb6402691 100644
--- a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.yml
+++ b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.yml
@@ -3,7 +3,7 @@ dependencies:
- pip:
- azureml-sdk
- azureml-pipeline-core
- - azureml-contrib-pipeline-steps
+ - azureml-pipeline-steps
- pandas
- requests
- azureml-widgets