From bb11c80b1b39a78ec2bde5fc14bef92bc6b612ff Mon Sep 17 00:00:00 2001 From: amlrelsa-ms Date: Wed, 23 Aug 2023 03:24:03 +0000 Subject: [PATCH 1/4] update samples from Release-193 as a part of 1.53.0 SDK stable release --- README.md | 4 +- configuration.ipynb | 4 +- .../RAPIDS/azure-ml-with-nvidia-rapids.ipynb | 2 +- .../fairness/fairlearn-azureml-mitigation.yml | 4 +- .../fairness/upload-fairness-dashboard.yml | 4 +- .../automated-machine-learning/automl_env.yml | 11 +- .../automl_env_linux.yml | 12 +- .../automl_env_mac.yml | 18 +- ...fication-bank-marketing-all-features.ipynb | 20 +- ...-ml-classification-credit-card-fraud.ipynb | 16 + .../auto-ml-classification-text-dnn.ipynb | 22 +- .../classification-text-dnn/helper.py | 12 +- .../classification-text-dnn/infer.py | 2 +- .../auto-ml-continuous-retraining.ipynb | 16 + .../continuous-retraining/check_data.py | 2 +- .../codegen-for-autofeaturization.ipynb | 2 +- ...-training-from-autofeaturization-run.ipynb | 2 +- .../experimental/automl_thin_client_env.yml | 13 - ...tion-credit-card-fraud-local-managed.ipynb | 2 +- .../auto-ml-regression-model-proxy.ipynb | 2 +- ...-ml-forecasting-backtest-many-models.ipynb | 2 +- ...ml-forecasting-backtest-single-model.ipynb | 2 +- .../auto-ml-forecasting-bike-share.ipynb | 40 +- .../forecasting_script.py | 2 +- .../auto-ml-forecasting-energy-demand.ipynb | 398 +++++++------- .../forecasting_script.py | 2 +- .../auto-ml-forecasting-github-dau.ipynb | 13 +- .../forecasting-github-dau/infer.py | 2 +- ...-forecasting-hierarchical-timeseries.ipynb | 9 +- .../auto-ml-forecasting-many-models.ipynb | 13 +- ...to-ml-forecasting-orange-juice-sales.ipynb | 42 +- .../forecasting_script.py | 2 +- .../auto-ml-forecasting-pipelines.ipynb | 55 +- .../forecasting-pipelines/scripts/infer.py | 4 +- ...nivariate-recipe-experiment-settings.ipynb | 187 ++++--- ...ing-univariate-recipe-run-experiment.ipynb | 269 ++++----- .../forecasting_script.py | 2 +- ...regression-explanation-featurization.ipynb | 54 +- ...e-app-insights-in-production-service.ipynb | 2 +- .../onnx-train-pytorch-aml-deploy-mnist.ipynb | 4 +- .../production-deploy-to-aks-gpu.ipynb | 14 +- .../production-deploy-to-aks-gpu.yml | 1 - .../production-deploy-to-aks-ssl.ipynb | 2 +- .../production-deploy-to-aks.ipynb | 2 +- .../deployment/spark/iris.model/data/_SUCCESS | 0 ...4b28-bbca-6c17889ddcbf-c000.snappy.parquet | Bin 4224 -> 0 bytes .../spark/iris.model/metadata/_SUCCESS | 0 .../spark/iris.model/metadata/part-00000 | 1 - .../model-register-and-deploy-spark.ipynb | 349 ------------ .../spark/model-register-and-deploy-spark.yml | 4 - how-to-use-azureml/deployment/spark/score.py | 37 -- .../gpu-explanation/gpu_tree_explainer.py | 59 -- ...ain-explain-model-gpu-tree-explainer.ipynb | 517 ------------------ ...train-explain-model-gpu-tree-explainer.yml | 18 - .../explain-model-on-amlcompute.yml | 3 +- ...save-retrieve-explanations-run-history.yml | 2 +- ...train-explain-model-locally-and-deploy.yml | 2 +- ...explain-model-on-amlcompute-and-deploy.yml | 3 +- ...nes-parameter-tuning-with-hyperdrive.ipynb | 2 +- ...asing-datapath-and-pipelineparameter.ipynb | 2 +- ...casing-dataset-and-pipelineparameter.ipynb | 2 +- ...with-automated-machine-learning-step.ipynb | 2 +- .../aml-pipelines-with-commandstep.ipynb | 4 +- ...-pipelines-with-notebook-runner-step.ipynb | 2 +- .../file-dataset-image-inference-mnist.ipynb | 2 +- .../file-dataset-partition-per-folder.ipynb | 2 +- .../tabular-dataset-inference-iris.ipynb | 2 +- ...tabular-dataset-partition-per-column.ipynb | 5 +- ...pipeline-style-transfer-parallel-run.ipynb | 2 +- .../fastai-with-custom-docker.ipynb | 2 +- ...yperparameter-tune-deploy-with-keras.ipynb | 64 ++- ...-hyperparameter-tune-deploy-with-keras.yml | 2 - ...pytorch-with-distributeddataparallel.ipynb | 8 +- .../distributed-pytorch-with-horovod.ipynb | 4 +- ...erparameter-tune-deploy-with-pytorch.ipynb | 65 ++- ...erparameter-tune-deploy-with-sklearn.ipynb | 56 +- .../distributed-tensorflow-with-horovod.ipynb | 20 +- ...-tune-and-warm-start-with-tensorflow.ipynb | 44 +- ...arameter-tune-deploy-with-tensorflow.ipynb | 56 +- ...rparameter-tune-deploy-with-tensorflow.yml | 2 - .../tf_mnist_with_checkpoint.py | 4 +- .../train-tensorflow-resume-training.ipynb | 27 +- .../train-and-deploy-pytorch.ipynb | 2 +- .../reinforcement-learning/README.md | 2 +- .../files/networkutils.py | 237 -------- .../pong_rllib.ipynb | 2 +- .../rai-loan-decision.yml | 4 +- .../logging-api/logging-api.ipynb | 2 +- .../export-run-history-to-tensorboard.yml | 10 - .../tensorboard/tensorboard/tensorboard.yml | 1 - .../pipeline-for-image-classification.ipynb | 2 +- .../train-with-datasets.ipynb | 2 +- index.md | 232 ++++---- setup-environment/configuration.ipynb | 2 +- .../img-classification-part1-training.ipynb | 2 +- ...ipeline-batch-scoring-classification.ipynb | 2 +- 96 files changed, 1116 insertions(+), 2055 deletions(-) delete mode 100644 how-to-use-azureml/deployment/spark/iris.model/data/_SUCCESS delete mode 100644 how-to-use-azureml/deployment/spark/iris.model/data/part-00000-dabcf097-2b45-4b28-bbca-6c17889ddcbf-c000.snappy.parquet delete mode 100644 how-to-use-azureml/deployment/spark/iris.model/metadata/_SUCCESS delete mode 100644 how-to-use-azureml/deployment/spark/iris.model/metadata/part-00000 delete mode 100644 how-to-use-azureml/deployment/spark/model-register-and-deploy-spark.ipynb delete mode 100644 how-to-use-azureml/deployment/spark/model-register-and-deploy-spark.yml delete mode 100644 how-to-use-azureml/deployment/spark/score.py delete mode 100644 how-to-use-azureml/explain-model/azure-integration/gpu-explanation/gpu_tree_explainer.py delete mode 100644 how-to-use-azureml/explain-model/azure-integration/gpu-explanation/train-explain-model-gpu-tree-explainer.ipynb delete mode 100644 how-to-use-azureml/explain-model/azure-integration/gpu-explanation/train-explain-model-gpu-tree-explainer.yml delete mode 100644 how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/networkutils.py delete mode 100644 how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.yml diff --git a/README.md b/README.md index ecddfe6a1..d9ebd8206 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,6 @@ # Azure Machine Learning Python SDK notebooks - -** **With the introduction of AzureML SDK v2, this samples repository for the v1 SDK is now deprecated and will not be monitored or updated. Users are encouraged to visit the [v2 SDK samples repository](https://github.com/Azure/azureml-examples) instead for up-to-date and enhanced examples of how to build, train, and deploy machine learning models with AzureML's newest features.** ** - +> a community-driven repository of examples using mlflow for tracking can be found at https://github.com/Azure/azureml-examples Welcome to the Azure Machine Learning Python SDK notebooks repository! diff --git a/configuration.ipynb b/configuration.ipynb index dac58f080..86b7388e1 100644 --- a/configuration.ipynb +++ b/configuration.ipynb @@ -103,7 +103,7 @@ "source": [ "import azureml.core\n", "\n", - "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.53.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, @@ -329,7 +329,7 @@ " print(\"Creating new gpu-cluster\")\n", " \n", " # Specify the configuration for the new cluster\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size=\"Standard_NC6s_v3\",\n", " min_nodes=0,\n", " max_nodes=4)\n", " # Create the cluster with the specified name and configuration\n", diff --git a/contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb b/contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb index a6ade1184..7c33e4bff 100644 --- a/contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb +++ b/contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb @@ -174,7 +174,7 @@ "else:\n", " print(\"creating new cluster\")\n", " # vm_size parameter below could be modified to one of the RAPIDS-supported VM types\n", - " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"Standard_NC6s_v2\", min_nodes=1, max_nodes = 1)\n", + " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"Standard_NC6s_v3\", min_nodes=1, max_nodes = 1)\n", "\n", " # create the cluster\n", " gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, provisioning_config)\n", diff --git a/contrib/fairness/fairlearn-azureml-mitigation.yml b/contrib/fairness/fairlearn-azureml-mitigation.yml index 94449e1a2..75ba1a9f7 100644 --- a/contrib/fairness/fairlearn-azureml-mitigation.yml +++ b/contrib/fairness/fairlearn-azureml-mitigation.yml @@ -3,10 +3,10 @@ dependencies: - pip: - azureml-sdk - azureml-contrib-fairness - - fairlearn>=0.6.2 + - fairlearn>=0.6.2,<=0.7.0 - joblib - liac-arff - - raiwidgets~=0.26.0 + - raiwidgets~=0.28.0 - itsdangerous==2.0.1 - markupsafe<2.1.0 - protobuf==3.20.0 diff --git a/contrib/fairness/upload-fairness-dashboard.yml b/contrib/fairness/upload-fairness-dashboard.yml index 9cae9e8cc..d085603e0 100644 --- a/contrib/fairness/upload-fairness-dashboard.yml +++ b/contrib/fairness/upload-fairness-dashboard.yml @@ -3,10 +3,10 @@ dependencies: - pip: - azureml-sdk - azureml-contrib-fairness - - fairlearn>=0.6.2 + - fairlearn>=0.6.2,<=0.7.0 - joblib - liac-arff - - raiwidgets~=0.26.0 + - raiwidgets~=0.28.0 - itsdangerous==2.0.1 - markupsafe<2.1.0 - protobuf==3.20.0 diff --git a/how-to-use-azureml/automated-machine-learning/automl_env.yml b/how-to-use-azureml/automated-machine-learning/automl_env.yml index 5d8554edd..71dc83c24 100644 --- a/how-to-use-azureml/automated-machine-learning/automl_env.yml +++ b/how-to-use-azureml/automated-machine-learning/automl_env.yml @@ -7,7 +7,8 @@ dependencies: # The python interpreter version. # Azure ML only supports 3.7.0 and later. - pip==22.3.1 -- python>=3.7,<3.9 +- python>=3.8,<3.9 +- holidays==0.10.3 - conda-forge::fbprophet==0.7.1 - pandas==1.1.5 - scipy==1.5.3 @@ -16,10 +17,10 @@ dependencies: - pip: # Required packages for AzureML execution, history, and data preparation. - - azureml-widgets~=1.51.0 - - azureml-defaults~=1.51.0 - - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.51.0/validated_win32_requirements.txt [--no-deps] - - matplotlib==3.6.2 + - azureml-widgets~=1.53.0 + - azureml-defaults~=1.53.0 + - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.53.0/validated_win32_requirements.txt [--no-deps] + - matplotlib==3.7.1 - xgboost==1.3.3 - cmdstanpy==0.9.5 - setuptools-git==1.2 diff --git a/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml b/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml index 88326eaf5..cba23ec34 100644 --- a/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml +++ b/how-to-use-azureml/automated-machine-learning/automl_env_linux.yml @@ -7,13 +7,13 @@ dependencies: # The python interpreter version. # Azure ML only supports 3.7 and later. - pip==22.3.1 -- python>=3.7,<3.9 -- matplotlib==3.2.1 +- python>=3.8,<3.9 +- matplotlib==3.7.1 - numpy>=1.21.6,<=1.22.3 - cython==0.29.14 - urllib3==1.26.7 - scipy>=1.4.1,<=1.5.3 -- scikit-learn==0.22.1 +- scikit-learn==1.1.0 - py-xgboost<=1.3.3 - holidays==0.10.3 - conda-forge::fbprophet==0.7.1 @@ -23,10 +23,10 @@ dependencies: - pip: # Required packages for AzureML execution, history, and data preparation. - - azureml-widgets~=1.51.0 - - azureml-defaults~=1.51.0 + - azureml-widgets~=1.53.0 + - azureml-defaults~=1.53.0 - pytorch-transformers==1.0.0 - spacy==2.2.4 - pystan==2.19.1.1 - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.51.0/validated_linux_requirements.txt [--no-deps] + - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.53.0/validated_linux_requirements.txt [--no-deps] diff --git a/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml b/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml index f70f0ebe3..ff5c823b0 100644 --- a/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml +++ b/how-to-use-azureml/automated-machine-learning/automl_env_mac.yml @@ -7,26 +7,22 @@ dependencies: # The python interpreter version. # Currently Azure ML only supports 3.7 and later. - pip==22.3.1 -- python>=3.7,<3.9 -- matplotlib==3.2.1 +- python>=3.8,<3.9 - numpy>=1.21.6,<=1.22.3 - cython==0.29.14 -- urllib3==1.26.7 - scipy>=1.4.1,<=1.5.3 -- scikit-learn==0.22.1 -- py-xgboost<=1.3.3 +- scikit-learn==1.1.0 - holidays==0.10.3 -- pytorch::pytorch=1.11.0 -- cudatoolkit=9.0 -- notebook - pip: # Required packages for AzureML execution, history, and data preparation. - - azureml-widgets~=1.51.0 - - azureml-defaults~=1.51.0 + - azureml-widgets~=1.53.0 + - azureml-defaults~=1.53.0 - pytorch-transformers==1.0.0 - spacy==2.2.4 - pystan==2.19.1.1 - fbprophet==0.7.1 + - xgboost==1.3.3 + - matplotlib==3.7.1 - https://aka.ms/automl-resources/packages/en_core_web_sm-2.1.0.tar.gz - - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.51.0/validated_darwin_requirements.txt [--no-deps] + - -r https://automlsdkdataresources.blob.core.windows.net/validated-requirements/1.53.0/validated_darwin_requirements.txt [--no-deps] diff --git a/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb b/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb index 33ea3b14a..368500457 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb +++ b/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb @@ -1,5 +1,21 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. All rights reserved.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.png)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -830,9 +846,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": true - }, + "metadata": {}, "outputs": [], "source": [ "%matplotlib notebook\n", diff --git a/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb b/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb index a310c7be6..e7a8d8bad 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb +++ b/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb @@ -1,5 +1,21 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. All rights reserved.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.png)" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb index d941e0953..9a13ce7eb 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb +++ b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb @@ -1,5 +1,21 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. All rights reserved.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.png)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -139,8 +155,8 @@ " print(\"Found existing cluster, use it.\")\n", "except ComputeTargetException:\n", " compute_config = AmlCompute.provisioning_configuration(\n", - " vm_size=\"STANDARD_NC6\", # CPU for BiLSTM, such as \"STANDARD_D2_V2\"\n", - " # To use BERT (this is recommended for best performance), select a GPU such as \"STANDARD_NC6\"\n", + " vm_size=\"Standard_NC6s_v3\", # CPU for BiLSTM, such as \"STANDARD_D2_V2\"\n", + " # To use BERT (this is recommended for best performance), select a GPU such as \"Standard_NC6s_v3\"\n", " # or similar GPU option\n", " # available in your workspace\n", " idle_seconds_before_scaledown=60,\n", @@ -336,7 +352,7 @@ "metadata": {}, "source": [ "For local inferencing, you can load the model locally via. the method `remote_run.get_output()`. For more information on the arguments expected by this method, you can run `remote_run.get_output??`.\n", - "Note that when the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your azureml-examples folder here: \"azureml-examples/python-sdk/tutorials/automl-with-azureml\"" + "Note that when the model contains BERT, this step will require pytorch and pytorch-transformers installed in your local environment. The exact versions of these packages can be found in the **automl_env.yml** file located in the local copy of your MachineLearningNotebooks folder here: \"MachineLearningNotebooks\\how-to-use-azureml\\automated-machine-learning\"" ] }, { diff --git a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/helper.py b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/helper.py index 499d75e34..881163362 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/helper.py +++ b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/helper.py @@ -1,3 +1,4 @@ +import json import pandas as pd from azureml.core import Environment, ScriptRunConfig from azureml.core.run import Run @@ -13,7 +14,16 @@ def run_inference( model_name, ): - inference_env = train_run.get_environment() + try: + inference_env = train_run.get_environment() + except BaseException: + run_details = train_run.get_details() + run_def = run_details.get("runDefinition") + env = run_def.get("environment") + if env is None: + raise + json.dump(env, open("azureml_environment.json", "w")) + inference_env = Environment.load_from_directory(".") est = ScriptRunConfig( source_directory=script_folder, diff --git a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/infer.py b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/infer.py index 4d926d0b2..98525f88d 100644 --- a/how-to-use-azureml/automated-machine-learning/classification-text-dnn/infer.py +++ b/how-to-use-azureml/automated-machine-learning/classification-text-dnn/infer.py @@ -3,7 +3,7 @@ import pandas as pd import numpy as np -from sklearn.externals import joblib +import joblib from azureml.automl.runtime.shared.score import scoring, constants from azureml.core import Run, Dataset diff --git a/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb b/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb index 90f519935..f1ce02605 100644 --- a/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb +++ b/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb @@ -1,5 +1,21 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. All rights reserved.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.png)" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/how-to-use-azureml/automated-machine-learning/continuous-retraining/check_data.py b/how-to-use-azureml/automated-machine-learning/continuous-retraining/check_data.py index aec68d422..628ea9611 100644 --- a/how-to-use-azureml/automated-machine-learning/continuous-retraining/check_data.py +++ b/how-to-use-azureml/automated-machine-learning/continuous-retraining/check_data.py @@ -31,7 +31,7 @@ model = Model(ws, args.model_name) last_train_time = model.created_time print("Model was last trained on {0}.".format(last_train_time)) -except Exception as e: +except Exception: print("Could not get last model train time.") last_train_time = datetime.min.replace(tzinfo=pytz.UTC) diff --git a/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-codegen/codegen-for-autofeaturization.ipynb b/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-codegen/codegen-for-autofeaturization.ipynb index d90d8dbde..254f05ffa 100644 --- a/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-codegen/codegen-for-autofeaturization.ipynb +++ b/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-codegen/codegen-for-autofeaturization.ipynb @@ -97,7 +97,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.53.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-custom-model-training/custom-model-training-from-autofeaturization-run.ipynb b/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-custom-model-training/custom-model-training-from-autofeaturization-run.ipynb index abd76190f..ab73d8f21 100644 --- a/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-custom-model-training/custom-model-training-from-autofeaturization-run.ipynb +++ b/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-custom-model-training/custom-model-training-from-autofeaturization-run.ipynb @@ -97,7 +97,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.53.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/experimental/automl_thin_client_env.yml b/how-to-use-azureml/automated-machine-learning/experimental/automl_thin_client_env.yml index 1cc5549a4..76acd2b57 100644 --- a/how-to-use-azureml/automated-machine-learning/experimental/automl_thin_client_env.yml +++ b/how-to-use-azureml/automated-machine-learning/experimental/automl_thin_client_env.yml @@ -1,21 +1,9 @@ name: azure_automl_experimental dependencies: # The python interpreter version. -<<<<<<< HEAD - # Currently Azure ML only supports 3.6.0 and later. -- pip<=20.2.4 -- python>=3.6.0,<3.10 -- cython==0.29.14 -- urllib3==1.26.7 -- PyJWT < 2.0.0 -- numpy==1.22.3 -- pywin32==227 -- cryptography<37.0.0 -======= # Currently Azure ML only supports 3.7.0 and later. - pip<=22.3.1 - python>=3.7.0,<3.11 ->>>>>>> 4671acd451ce979c3cebcd3917804861a333b710 - pip: # Required packages for AzureML execution, history, and data preparation. @@ -25,4 +13,3 @@ dependencies: - azureml-mlflow - pandas - mlflow - - docker<6.0.0 diff --git a/how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb b/how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb index 58afc4389..6974277ed 100644 --- a/how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb +++ b/how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.53.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb b/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb index 130f19011..85ec52fc4 100644 --- a/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb +++ b/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb @@ -91,7 +91,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.53.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb index 9ec627dec..4faf226bb 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.png)" ] }, { diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb index bc1a6a16e..f6c2c4c05 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb @@ -7,7 +7,7 @@ "Copyright (c) Microsoft Corporation. All rights reserved.\n", "\n", "Licensed under the MIT License.\n", - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl-forecasting-function.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/auto-ml-forecasting-backtest-single-model.png)" ] }, { diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb index 02476bd83..d63d01e64 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb @@ -1,7 +1,6 @@ { "cells": [ { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -11,7 +10,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -19,7 +17,13 @@ ] }, { - "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "!Important!
This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-bike-share)).
" + ] + }, + { "cell_type": "markdown", "metadata": {}, "source": [ @@ -37,7 +41,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -56,7 +59,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -86,7 +88,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -103,7 +104,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -137,7 +137,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -177,7 +176,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -201,7 +199,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "nteract": { @@ -237,7 +234,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": { "nteract": { @@ -277,7 +273,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -316,7 +311,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -334,7 +328,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -359,7 +352,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -378,7 +370,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -398,7 +389,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -441,7 +431,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -467,7 +456,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -486,7 +474,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -512,7 +499,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -556,7 +542,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -564,7 +549,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -583,7 +567,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -606,7 +589,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -637,7 +619,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -656,7 +637,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -673,7 +653,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -705,7 +684,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -715,7 +693,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -747,7 +724,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -822,7 +798,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.9" + "version": "3.8.10" }, "microsoft": { "ms_spell_check": { diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/forecasting_script.py b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/forecasting_script.py index 6988dd268..6f0b1194a 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/forecasting_script.py +++ b/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/forecasting_script.py @@ -1,6 +1,6 @@ import argparse from azureml.core import Dataset, Run -from sklearn.externals import joblib +import joblib parser = argparse.ArgumentParser() parser.add_argument( diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb index f2666bb95..e9f498af8 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb @@ -2,22 +2,30 @@ "cells": [ { "cell_type": "markdown", + "metadata": {}, "source": [ "Copyright (c) Microsoft Corporation. All rights reserved.\n", "\n", "Licensed under the MIT License." - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.png)" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, + "source": [ + "!Important!
This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/blob/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-task-energy-demand/automl-forecasting-task-energy-demand-advanced-mlflow.ipynb)).
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, "source": [ "# Automated Machine Learning\n", "_**Forecasting using the Energy Demand Dataset**_\n", @@ -32,11 +40,11 @@ "Advanced Forecasting\n", "1. [Advanced Training](#advanced_training)\n", "1. [Advanced Results](#advanced_results)" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# Introduction\n", "\n", @@ -52,18 +60,20 @@ "1. Generate the forecast and compute the out-of-sample accuracy metrics\n", "1. Configuration and remote run of AutoML for a time-series model with lag and rolling window features\n", "1. Run and explore the forecast with lagging features" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# Setup" - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import json\n", "import logging\n", @@ -82,36 +92,36 @@ "from azureml.core import Experiment, Workspace, Dataset\n", "from azureml.train.automl import AutoMLConfig\n", "from datetime import datetime" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "This notebook is compatible with Azure ML SDK version 1.35.0 or later." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "ws = Workspace.from_config()\n", "\n", @@ -133,13 +143,11 @@ "pd.set_option(\"display.max_colwidth\", None)\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "outputDf.T" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "## Create or Attach existing AmlCompute\n", "A compute target is required to execute a remote Automated ML run. \n", @@ -149,11 +157,13 @@ "#### Creation of AmlCompute takes approximately 5 minutes. \n", "If the AmlCompute with that name is already in your workspace this code will skip the creation process.\n", "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from azureml.core.compute import ComputeTarget, AmlCompute\n", "from azureml.core.compute_target import ComputeTargetException\n", @@ -172,24 +182,22 @@ " compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n", "\n", "compute_target.wait_for_completion(show_output=True)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# Data\n", "\n", "We will use energy consumption [data from New York City](http://mis.nyiso.com/public/P-58Blist.htm) for model training. The data is stored in a tabular format and includes energy demand and basic weather data at an hourly frequency. \n", "\n", "With Azure Machine Learning datasets you can keep a single copy of data in your storage, easily access data during model training, share data and collaborate with other users. Below, we will upload the datatset and create a [tabular dataset](https://docs.microsoft.com/bs-latn-ba/azure/machine-learning/service/how-to-create-register-datasets#dataset-types) to be used training and prediction." - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "Let's set up what we know about the dataset.\n", "\n", @@ -197,64 +205,66 @@ "Time column is the time axis along which to predict.\n", "\n", "The other columns, \"temp\" and \"precip\", are implicitly designated as features." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "target_column_name = \"demand\"\n", "time_column_name = \"timeStamp\"" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "dataset = Dataset.Tabular.from_delimited_files(\n", " path=\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv\"\n", ").with_timestamp_columns(fine_grain_timestamp=time_column_name)\n", "dataset.take(5).to_pandas_dataframe().reset_index(drop=True)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "The NYC Energy dataset is missing energy demand values for all datetimes later than August 10th, 2017 5AM. Below, we trim the rows containing these missing values from the end of the dataset." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Cut off the end of the dataset due to large number of nan values\n", "dataset = dataset.time_before(datetime(2017, 10, 10, 5))" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "## Split the data into train and test sets" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "The first split we make is into train and test sets. Note that we are splitting on time. Data before and including August 8th, 2017 5AM will be used for training, and data after will be used for testing." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# split into train based on time\n", "train = (\n", @@ -263,13 +273,13 @@ " .reset_index(drop=True)\n", ")\n", "train.sort_values(time_column_name).tail(5)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# split into test based on time\n", "test = (\n", @@ -278,13 +288,22 @@ " .reset_index(drop=True)\n", ")\n", "test.head(5)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "outputs_hidden": false + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "outputs": [], "source": [ "# register the splitted train and test data in workspace storage\n", "from azureml.data.dataset_factory import TabularDatasetFactory\n", @@ -296,23 +315,11 @@ "test_dataset = TabularDatasetFactory.register_pandas_dataframe(\n", " test, target=(datastore, \"dataset/\"), name=\"nyc_energy_test\"\n", ")" - ], - "outputs": [], - "execution_count": null, - "metadata": { - "jupyter": { - "source_hidden": false, - "outputs_hidden": false - }, - "nteract": { - "transient": { - "deleting": false - } - } - } + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Setting the maximum forecast horizon\n", "\n", @@ -321,20 +328,20 @@ "Learn more about forecast horizons in our [Auto-train a time-series forecast model](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-auto-train-forecast#configure-and-run-experiment) guide.\n", "\n", "In this example, we set the horizon to 48 hours." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "forecast_horizon = 48" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "## Forecasting Parameters\n", "To define forecasting parameters for your experiment training, you can leverage the ForecastingParameters class. The table below details the forecasting parameter we will be passing into our experiment.\n", @@ -345,11 +352,11 @@ "|**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|\n", "|**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.\n", "|**cv_step_size**|Number of periods between two consecutive cross-validation folds. The default value is \"auto\", in which case AutoMl determines the cross-validation step size automatically, if a validation set is not provided. Or users could specify an integer value." - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# Train\n", "\n", @@ -367,18 +374,20 @@ "|**n_cross_validations**|Number of cross-validation folds to use for model/pipeline selection. The default value is \"auto\", in which case AutoMl determines the number of cross-validations automatically, if a validation set is not provided. Or users could specify an integer value.\n", "|**enable_early_stopping**|Flag to enble early termination if the score is not improving in the short term.|\n", "|**forecasting_parameters**|A class holds all the forecasting related parameters.|\n" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the experiment_timeout_hours parameter value to get results." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from azureml.automl.core.forecasting_parameters import ForecastingParameters\n", "\n", @@ -402,65 +411,65 @@ " verbosity=logging.INFO,\n", " forecasting_parameters=forecasting_parameters,\n", ")" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while.\n", "One may specify `show_output = True` to print currently running iterations to the console." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "remote_run = experiment.submit(automl_config, show_output=False)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "remote_run.wait_for_completion()" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "## Retrieve the Best Run details\n", "Below we retrieve the best Run object from among all the runs in the experiment." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "best_run = remote_run.get_best_child()\n", "best_run" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "## Featurization\n", "We can look at the engineered feature names generated in time-series featurization via. the JSON file named 'engineered_feature_names.json' under the run outputs." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Download the JSON file locally\n", "best_run.download_file(\n", @@ -470,13 +479,11 @@ " records = json.load(f)\n", "\n", "records" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### View featurization summary\n", "You can also see what featurization steps were performed on different raw features in the user data. For each raw feature in the user data, the following information is displayed:\n", @@ -486,11 +493,13 @@ "+ Type detected\n", "+ If feature was dropped\n", "+ List of feature transformations for the raw feature" - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Download the featurization summary JSON file locally\n", "best_run.download_file(\n", @@ -512,41 +521,41 @@ " \"Transformations\",\n", " ]\n", "]" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# Forecasting\n", "\n", "Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. We will do batch scoring on the test dataset which should have the same schema as training dataset.\n", "\n", "The inference will run on a remote compute. In this example, it will re-use the training compute." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "test_experiment = Experiment(ws, experiment_name + \"_inference\")" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Retrieving forecasts from the model\n", "We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from run_forecast import run_remote_inference\n", "\n", @@ -561,32 +570,32 @@ "\n", "# download the inference output file to the local machine\n", "remote_run_infer.download_file(\"outputs/predictions.csv\", \"predictions.csv\")" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Evaluate\n", "To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). For more metrics that can be used for evaluation after training, please see [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics), and [how to calculate residuals](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals)." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# load forecast data frame\n", "fcst_df = pd.read_csv(\"predictions.csv\", parse_dates=[time_column_name])\n", "fcst_df.head()" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from azureml.automl.core.shared import constants\n", "from azureml.automl.runtime.shared.score import scoring\n", @@ -613,31 +622,31 @@ " (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n", ")\n", "plt.show()" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# Advanced Training \n", "We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation." - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Using lags and rolling window features\n", "Now we will configure the target lags, that is the previous values of the target variables, meaning the prediction is no longer horizon-less. We therefore must still specify the `forecast_horizon` that the model will learn to forecast. The `target_lags` keyword specifies how far back we will construct the lags of the target variable, and the `target_rolling_window_size` specifies the size of the rolling window over which we will generate the `max`, `min` and `sum` features.\n", "\n", "This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the iteration_timeout_minutes parameter value to get results." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "advanced_forecasting_parameters = ForecastingParameters(\n", " time_column_name=time_column_name,\n", @@ -668,63 +677,63 @@ " verbosity=logging.INFO,\n", " forecasting_parameters=advanced_forecasting_parameters,\n", ")" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "We now start a new remote run, this time with lag and rolling window featurization. AutoML applies featurizations in the setup stage, prior to iterating over ML models. The full training set is featurized first, followed by featurization of each of the CV splits. Lag and rolling window features introduce additional complexity, so the run will take longer than in the previous example that lacked these featurizations." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "advanced_remote_run = experiment.submit(automl_config, show_output=False)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "advanced_remote_run.wait_for_completion()" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Retrieve the Best Run details" - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "best_run_lags = remote_run.get_best_child()\n", "best_run_lags" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# Advanced Results\n", "We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "test_experiment_advanced = Experiment(ws, experiment_name + \"_inference_advanced\")\n", "advanced_remote_run_infer = run_remote_inference(\n", @@ -741,23 +750,23 @@ "advanced_remote_run_infer.download_file(\n", " \"outputs/predictions.csv\", \"predictions_advanced.csv\"\n", ")" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "fcst_adv_df = pd.read_csv(\"predictions_advanced.csv\", parse_dates=[time_column_name])\n", "fcst_adv_df.head()" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from azureml.automl.core.shared import constants\n", "from azureml.automl.runtime.shared.score import scoring\n", @@ -786,10 +795,7 @@ " (test_pred, test_test), (\"prediction\", \"truth\"), loc=\"upper left\", fontsize=8\n", ")\n", "plt.show()" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] } ], "metadata": { @@ -802,40 +808,40 @@ "how-to-use-azureml", "automated-machine-learning" ], + "kernel_info": { + "name": "python3" + }, "kernelspec": { - "name": "python38-azureml", + "display_name": "Python 3.8 - AzureML", "language": "python", - "display_name": "Python 3.8 - AzureML" + "name": "python38-azureml" }, "language_info": { - "name": "python", - "version": "3.8.5", - "mimetype": "text/x-python", "codemirror_mode": { "name": "ipython", "version": 3 }, - "pygments_lexer": "ipython3", + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", "nbconvert_exporter": "python", - "file_extension": ".py" - }, - "vscode": { - "interpreter": { - "hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca" - } + "pygments_lexer": "ipython3", + "version": "3.8.10" }, "microsoft": { "ms_spell_check": { "ms_spell_check_language": "en" } }, - "kernel_info": { - "name": "python3" - }, "nteract": { "version": "nteract-front-end@1.0.0" + }, + "vscode": { + "interpreter": { + "hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca" + } } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } \ No newline at end of file diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/forecasting_script.py b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/forecasting_script.py index 40724de54..01c831b4c 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/forecasting_script.py +++ b/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/forecasting_script.py @@ -6,7 +6,7 @@ import argparse from azureml.core import Dataset, Run -from sklearn.externals import joblib +import joblib from pandas.tseries.frequencies import to_offset parser = argparse.ArgumentParser() diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-github-dau/auto-ml-forecasting-github-dau.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-github-dau/auto-ml-forecasting-github-dau.ipynb index 72d8b2074..79ab282c2 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-github-dau/auto-ml-forecasting-github-dau.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-github-dau/auto-ml-forecasting-github-dau.ipynb @@ -19,7 +19,14 @@ "hidePrompt": false }, "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.png)" + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-github-dau/auto-ml-forecasting-github-dau.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "!Important!
This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-github-dau)).
" ] }, { @@ -382,7 +389,7 @@ "automl_config = AutoMLConfig(\n", " task=\"forecasting\",\n", " primary_metric=\"normalized_root_mean_squared_error\",\n", - " experiment_timeout_hours=1,\n", + " experiment_timeout_hours=1.5,\n", " training_data=train_dataset,\n", " label_column_name=target_column_name,\n", " validation_data=valid_dataset,\n", @@ -695,7 +702,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.9" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-github-dau/infer.py b/how-to-use-azureml/automated-machine-learning/forecasting-github-dau/infer.py index d351380d2..70505ba18 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-github-dau/infer.py +++ b/how-to-use-azureml/automated-machine-learning/forecasting-github-dau/infer.py @@ -4,7 +4,7 @@ import numpy as np import pandas as pd -from sklearn.externals import joblib +import joblib from sklearn.metrics import mean_absolute_error, mean_squared_error from azureml.automl.runtime.shared.score import scoring, constants diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.ipynb index cd10b3f54..55725f24c 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.ipynb @@ -16,6 +16,13 @@ "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "!Important!
This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/pipelines/1k_demand_forecasting_with_pipeline_components/automl-forecasting-demand-hierarchical-timeseries-in-pipeline)).
" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -666,7 +673,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-many-models/auto-ml-forecasting-many-models.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-many-models/auto-ml-forecasting-many-models.ipynb index 4db94da22..660add29a 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-many-models/auto-ml-forecasting-many-models.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-many-models/auto-ml-forecasting-many-models.ipynb @@ -16,6 +16,13 @@ "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.png)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "!Important!
This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/pipelines/1k_demand_forecasting_with_pipeline_components/automl-forecasting-demand-many-models-in-pipeline)).
" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -306,7 +313,7 @@ "from azureml.core.compute import ComputeTarget, AmlCompute\n", "\n", "# Name your cluster\n", - "compute_name = \"mm-compute\"\n", + "compute_name = \"mm-compute-v1\"\n", "\n", "\n", "if compute_name in ws.compute_targets:\n", @@ -316,7 +323,7 @@ "else:\n", " print(\"Creating a new compute target...\")\n", " provisioning_config = AmlCompute.provisioning_configuration(\n", - " vm_size=\"STANDARD_D16S_V3\", max_nodes=20\n", + " vm_size=\"STANDARD_D14_V2\", max_nodes=20\n", " )\n", " # Create the compute target\n", " compute_target = ComputeTarget.create(ws, compute_name, provisioning_config)\n", @@ -878,7 +885,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.5" + "version": "3.8.10" }, "vscode": { "interpreter": { diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb index c50cee1bd..4cc18f54f 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb @@ -1,7 +1,6 @@ { "cells": [ { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -11,7 +10,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -19,7 +17,13 @@ ] }, { - "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "!Important!
This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-orange-juice-sales)).
" + ] + }, + { "cell_type": "markdown", "metadata": {}, "source": [ @@ -37,7 +41,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -50,7 +53,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -75,7 +77,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -92,7 +93,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -126,7 +126,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -166,7 +165,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -190,7 +188,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -211,7 +208,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -231,7 +227,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -254,7 +249,9 @@ " time_series_id_column_names, group_keys=False\n", " )\n", " df_head = df_grouped.apply(lambda dfg: dfg.iloc[:-n])\n", + " df_head.reset_index(inplace=True, drop=True)\n", " df_tail = df_grouped.apply(lambda dfg: dfg.iloc[-n:])\n", + " df_tail.reset_index(inplace=True, drop=True)\n", " return df_head, df_tail\n", "\n", "\n", @@ -262,7 +259,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -288,7 +284,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -305,7 +300,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -333,7 +327,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -372,7 +365,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -390,7 +382,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -464,7 +455,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -491,7 +481,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -511,7 +500,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -549,7 +537,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -570,7 +557,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -579,7 +565,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -608,7 +593,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -664,7 +648,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -672,7 +655,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -695,7 +677,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -715,7 +696,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -761,7 +741,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -810,7 +789,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -866,7 +844,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.5" + "version": "3.8.10" }, "tags": [ "None" diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/forecasting_script.py b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/forecasting_script.py index 40724de54..01c831b4c 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/forecasting_script.py +++ b/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/forecasting_script.py @@ -6,7 +6,7 @@ import argparse from azureml.core import Dataset, Run -from sklearn.externals import joblib +import joblib from pandas.tseries.frequencies import to_offset parser = argparse.ArgumentParser() diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/auto-ml-forecasting-pipelines.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/auto-ml-forecasting-pipelines.ipynb index 50b67db0f..faef88ccb 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/auto-ml-forecasting-pipelines.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/auto-ml-forecasting-pipelines.ipynb @@ -1,5 +1,21 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "!Important!
This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/pipelines/1h_automl_in_pipeline/automl-forecasting-in-pipeline)).
\n", + "
\n", + "
\n", + "\n", + "For examples illustrating how to build pipelines with components, please use the following links:\n", + "" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -555,40 +571,15 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.core import Model\n", + "from azureml.train.automl.run import AutoMLRun\n", "\n", - "model = Model(ws, model_name_str)\n", - "download_path = model.download(model_name_str, exist_ok=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "After all the files are downloaded, we can generate the run config for inference runs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core import Environment, RunConfiguration\n", - "from azureml.core.conda_dependencies import CondaDependencies\n", + "for step in training_pipeline_run.get_steps():\n", + " if step.properties.get(\"StepType\") == \"AutoMLStep\":\n", + " automl_run = AutoMLRun(experiment, step.id)\n", + " break\n", "\n", - "env_file = os.path.join(download_path, \"conda_env_v_1_0_0.yml\")\n", - "inference_env = Environment(\"oj-inference-env\")\n", - "inference_env.python.conda_dependencies = CondaDependencies(\n", - " conda_dependencies_file_path=env_file\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "[Optional] The enviroment can also be assessed from the training run using `get_environment()` API." + "best_run = automl_run.get_best_child()\n", + "inference_env = best_run.get_environment()" ] }, { diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/scripts/infer.py b/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/scripts/infer.py index a900d32ff..83e561050 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/scripts/infer.py +++ b/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/scripts/infer.py @@ -6,7 +6,7 @@ import pandas as pd from pandas.tseries.frequencies import to_offset -from sklearn.externals import joblib +import joblib from sklearn.metrics import mean_absolute_error, mean_squared_error from azureml.data.dataset_factory import TabularDatasetFactory @@ -30,7 +30,7 @@ def infer_forecasting_dataset_tcn( run = Run.get_context() - registered_train = TabularDatasetFactory.register_pandas_dataframe( + TabularDatasetFactory.register_pandas_dataframe( df_all, target=( run.experiment.workspace.get_default_datastore(), diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-experiment-settings.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-experiment-settings.ipynb index c83731a08..391cc7e9e 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-experiment-settings.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-experiment-settings.ipynb @@ -2,22 +2,30 @@ "cells": [ { "cell_type": "markdown", + "metadata": {}, "source": [ "Copyright (c) Microsoft Corporation. All rights reserved.\n", "\n", "Licensed under the MIT License." - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/1_determine_experiment_settings.png)" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, + "source": [ + "!Important!
This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-recipes-univariate)).
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, "source": [ "In this notebook we will explore the univariate time-series data to determine the settings for an automated ML experiment. We will follow the thought process depicted in the following diagram:
\n", "![Forecasting after training](figures/univariate_settings_map_20210408.jpg)\n", @@ -40,11 +48,13 @@ "\n", "\n", "The answers to these questions will help determine the appropriate settings for the automated ML experiment.\n" - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import os\n", "import warnings\n", @@ -65,13 +75,13 @@ "# set printing options\n", "pd.set_option(\"display.max_columns\", 500)\n", "pd.set_option(\"display.width\", 1000)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# load data\n", "main_data_loc = \"data\"\n", @@ -86,13 +96,13 @@ "df.sort_values(by=TIME_COLNAME, inplace=True)\n", "df.set_index(TIME_COLNAME, inplace=True)\n", "df.head(2)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# plot the entire dataset\n", "fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n", @@ -100,20 +110,20 @@ "ax.title.set_text(\"Original Data Series\")\n", "locs, labels = plt.xticks()\n", "plt.xticks(rotation=45)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "The graph plots the alcohol sales in the United States. Because the data is trending, it can be difficult to see cycles, seasonality or other interesting behaviors due to the scaling issues. For example, if there is a seasonal pattern, which we will discuss later, we cannot see them on the trending data. In such case, it is worth plotting the same data in first differences." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# plot the entire dataset in first differences\n", "fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n", @@ -121,20 +131,18 @@ "ax.title.set_text(\"Data in first differences\")\n", "locs, labels = plt.xticks()\n", "plt.xticks(rotation=45)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "In the previous plot we observe that the data is more volatile towards the end of the series. This period coincides with the Covid-19 period, so we will exclude it from our experiment. Since in this example there are no user-provided features it is hard to make an argument that a model trained on the less volatile pre-covid data will be able to accurately predict the covid period." - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# 1. Seasonality\n", "\n", @@ -143,11 +151,13 @@ "2. If it's seasonal, does the data exhibit a trend (up or down)?\n", "\n", "It is hard to visually detect seasonality when the data is trending. The reason being is scale of seasonal fluctuations is dwarfed by the range of the trend in the data. One way to deal with this is to de-trend the data by taking the first differences. We will discuss this in more detail in the next section." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# plot the entire dataset in first differences\n", "fig, ax = plt.subplots(figsize=(6, 2), dpi=180)\n", @@ -155,20 +165,20 @@ "ax.title.set_text(\"Data in first differences\")\n", "locs, labels = plt.xticks()\n", "plt.xticks(rotation=45)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "For the next plot, we will exclude the Covid period again. We will also shorten the length of data because plotting a very long time series may prevent us from seeing seasonal patterns, if there are any, because the plot may look like a random walk." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# remove COVID period\n", "df = df[:COVID_PERIOD_START]\n", @@ -179,13 +189,11 @@ "ax.title.set_text(\"Data in first differences\")\n", "locs, labels = plt.xticks()\n", "plt.xticks(rotation=45)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "

Conclusion

\n", "\n", @@ -204,11 +212,11 @@ "
  • In the first case, by taking first differences we are removing stochastic trend, but we do not remove seasonal patterns. In the second case, we do not remove the stochastic trend and it can be captured by the trend component of the STL decomposition. It is hard to say which option will work best in your case, hence you will need to run both options to see which one results in more accurate forecasts.
  • \n", " \n", "" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# 2. Stationarity\n", "If the data does not exhibit seasonal patterns, we would like to see if the data is non-stationary. Particularly, we want to see if there is a clear trending behavior. If such behavior is observed, we would like to first difference the data and examine the plot of an auto-correlation function (ACF) known as correlogram. If the data is seasonal, differencing it will not get rid off the seasonality and this will be shown on the correlogram as well.\n", @@ -236,11 +244,13 @@ "\n", "\n", "To answer the first question, we run a series of tests (we call them unit root tests)." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# unit root tests\n", "test = unit_root_test_wrapper(df[TARGET_COLNAME])\n", @@ -248,13 +258,11 @@ "print(\"Summary table\", \"\\n\", test[\"summary\"], \"\\n\")\n", "print(\"Is the {} series stationary?: {}\".format(TARGET_COLNAME, test[\"stationary\"]))\n", "print(\"---------------\", \"\\n\")" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "In the previous cell, we ran a series of unit root tests. The summary table contains the following columns:\n", "\n", "" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "# Appendix: ACF, PACF and Lag Selection\n", "To do this, we will examine the ACF and partial ACF (PACF) plots of the differenced series. \n", @@ -424,11 +432,11 @@ "
  • In the interpretation posted above we need to be careful not to confuse the word \"leads\" with \"causes\" since these are not the same thing. We do not know the lagged value of the variable causes it to change. After all, there are probably many other features that may explain the movement in $Y_t$. All we are trying to do in this section is to identify situations when the variable contains the strong auto-regressive components that needs to be included in the model to improve forecast accuracy.
  • \n", " \n", "" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "\n", "" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "\n", " " - ], - "metadata": {} + ] } ], "metadata": { @@ -471,31 +478,31 @@ "name": "vlbejan" } ], + "kernel_info": { + "name": "python38-azureml" + }, "kernelspec": { - "name": "python38-azureml", + "display_name": "Python 3.8 - AzureML", "language": "python", - "display_name": "Python 3.8 - AzureML" + "name": "python38-azureml" }, "language_info": { - "name": "python", - "version": "3.8.10", - "mimetype": "text/x-python", "codemirror_mode": { "name": "ipython", "version": 3 }, - "pygments_lexer": "ipython3", + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", "nbconvert_exporter": "python", - "file_extension": ".py" + "pygments_lexer": "ipython3", + "version": "3.8.10" }, "microsoft": { "ms_spell_check": { "ms_spell_check_language": "en" } }, - "kernel_info": { - "name": "python38-azureml" - }, "nteract": { "version": "nteract-front-end@1.0.0" } diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-run-experiment.ipynb b/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-run-experiment.ipynb index 6d70f388d..bb5e1494d 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-run-experiment.ipynb +++ b/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-run-experiment.ipynb @@ -2,22 +2,30 @@ "cells": [ { "cell_type": "markdown", + "metadata": {}, "source": [ "Copyright (c) Microsoft Corporation. All rights reserved.\n", "\n", "Licensed under the MIT License." - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/2_run_experiment.png)" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, + "source": [ + "!Important!
    This notebook is outdated and is not supported by the AutoML Team. Please use the supported version ([link](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-recipes-univariate)).
    " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, "source": [ "# Running AutoML experiments\n", "\n", @@ -26,18 +34,20 @@ "
    \n", "\n", "The output generated by this notebook is saved in the `experiment_output`folder." - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Setup" - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import os\n", "import logging\n", @@ -60,21 +70,21 @@ "np.set_printoptions(precision=4, suppress=True, linewidth=100)\n", "pd.set_option(\"display.max_columns\", 500)\n", "pd.set_option(\"display.width\", 1000)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "As part of the setup you have already created a **Workspace**. You will also need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n", "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "ws = Workspace.from_config()\n", "amlcompute_cluster_name = \"recipe-cluster\"\n", @@ -104,22 +114,22 @@ "compute_target.wait_for_completion(\n", " show_output=True, min_node_count=None, timeout_in_minutes=20\n", ")" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Data\n", "\n", "Here, we will load the data from the csv file and drop the Covid period." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "main_data_loc = \"data\"\n", "train_file_name = \"S4248SM144SCEN.csv\"\n", @@ -137,34 +147,32 @@ "\n", "# remove the Covid period\n", "df = df.query('{} <= \"{}\"'.format(TIME_COLNAME, COVID_PERIOD_START))" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Set parameters\n", "\n", "The first set of parameters is based on the analysis performed in the `auto-ml-forecasting-univariate-recipe-experiment-settings` notebook. " - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# set parameters based on the settings notebook analysis\n", "DIFFERENCE_SERIES = True\n", "TARGET_LAGS = None\n", "STL_TYPE = None" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "Next, define additional parameters to be used in the AutoML config class.\n", "\n", @@ -179,30 +187,32 @@ " \n", " \n", "\n" - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# set other parameters\n", "FORECAST_HORIZON = 12\n", "TIME_SERIES_ID_COLNAMES = []\n", "BLOCKED_MODELS = []" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "To run AutoML, you also need to create an **Experiment**. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# choose a name for the run history container in the workspace\n", "if isinstance(TARGET_LAGS, list):\n", @@ -229,38 +239,38 @@ "pd.set_option(\"display.max_colwidth\", None)\n", "outputDf = pd.DataFrame(data=output, index=[\"\"])\n", "print(outputDf.T)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# create output directory\n", "output_dir = \"experiment_output/{}\".format(experiment_desc)\n", "if not os.path.exists(output_dir):\n", " os.makedirs(output_dir)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# difference data and test for unit root\n", "if DIFFERENCE_SERIES:\n", " df_delta = df.copy()\n", " df_delta[TARGET_COLNAME] = df[TARGET_COLNAME].diff()\n", " df_delta.dropna(axis=0, inplace=True)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# split the data into train and test set\n", "if DIFFERENCE_SERIES:\n", @@ -278,21 +288,21 @@ " time_colname=TIME_COLNAME,\n", " ts_id_colnames=TIME_SERIES_ID_COLNAMES,\n", " )" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Upload files to the Datastore\n", "The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace) is paired with the storage account, which contains the default data store. We will use it to upload the bike share data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "df_train.to_csv(\"train.csv\", index=False)\n", "df_test.to_csv(\"test.csv\", index=False)\n", @@ -309,20 +319,20 @@ "\n", "# print the first 5 rows of the Dataset\n", "train_dataset.to_pandas_dataframe().reset_index(drop=True).head(5)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Config AutoML" - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "time_series_settings = {\n", " \"time_column_name\": TIME_COLNAME,\n", @@ -349,76 +359,76 @@ " compute_target=compute_target,\n", " **time_series_settings,\n", ")" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "We will now run the experiment, you can go to Azure ML portal to view the run details." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "remote_run = experiment.submit(automl_config, show_output=False)\n", "remote_run.wait_for_completion()" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Retrieve the Best Run details\n", "Below we retrieve the best Run object from among all the runs in the experiment." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "best_run = remote_run.get_best_child()\n", "best_run" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Inference\n", "\n", "We now use the best fitted model from the AutoML Run to make forecasts for the test set. We will do batch scoring on the test dataset which should have the same schema as training dataset.\n", "\n", "The inference will run on a remote compute. In this example, it will re-use the training compute." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "test_experiment = Experiment(ws, experiment_name + \"_inference\")" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "## Retreiving forecasts from the model\n", "We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from run_forecast import run_remote_inference\n", "\n", @@ -432,31 +442,31 @@ "remote_run.wait_for_completion(show_output=False)\n", "\n", "remote_run.download_file(\"outputs/predictions.csv\", f\"{output_dir}/predictions.csv\")" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Download the prediction result for metrics calcuation\n", "The test data with predictions are saved in artifact `outputs/predictions.csv`. We will use it to calculate accuracy metrics and vizualize predictions versus actuals." - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "X_trans = pd.read_csv(f\"{output_dir}/predictions.csv\", parse_dates=[TIME_COLNAME])\n", "X_trans.head()" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# convert forecast in differences to levels\n", "def convert_fcst_diff_to_levels(fcst, yt, df_orig):\n", @@ -470,13 +480,13 @@ " )\n", " out.rename(columns={TARGET_COLNAME: \"actual_level\"}, inplace=True)\n", " return out" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "if DIFFERENCE_SERIES:\n", " # convert forecast in differences to the levels\n", @@ -490,20 +500,20 @@ " fcst_df[\"predicted_level\"] = y_predictions\n", "\n", "del X_trans" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Calculate metrics and save output" - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# compute metrics\n", "metrics_df = compute_metrics(fcst_df=fcst_df, metric_name=None, ts_id_colnames=None)\n", @@ -514,20 +524,20 @@ "\n", "metrics_df.to_csv(os.path.join(output_dir, metrics_file_name), index=True)\n", "fcst_df.to_csv(os.path.join(output_dir, fcst_file_name), index=True)" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Generate and save visuals" - ], - "metadata": {} + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "plot_df = df.query('{} > \"2010-01-01\"'.format(TIME_COLNAME))\n", "plot_df.set_index(TIME_COLNAME, inplace=True)\n", @@ -546,10 +556,7 @@ "\n", "plt.setp(labels, rotation=45)\n", "plt.savefig(os.path.join(output_dir, plot_file_name))" - ], - "outputs": [], - "execution_count": null, - "metadata": {} + ] } ], "metadata": { @@ -558,38 +565,38 @@ "name": "vlbejan" } ], + "kernel_info": { + "name": "python3" + }, "kernelspec": { - "name": "python38-azureml", + "display_name": "Python 3.8 - AzureML", "language": "python", - "display_name": "Python 3.8 - AzureML" + "name": "python38-azureml" }, "language_info": { - "name": "python", - "version": "3.8.5", - "mimetype": "text/x-python", "codemirror_mode": { "name": "ipython", "version": 3 }, - "pygments_lexer": "ipython3", + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", "nbconvert_exporter": "python", - "file_extension": ".py" - }, - "vscode": { - "interpreter": { - "hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca" - } + "pygments_lexer": "ipython3", + "version": "3.8.10" }, "microsoft": { "ms_spell_check": { "ms_spell_check_language": "en" } }, - "kernel_info": { - "name": "python3" - }, "nteract": { "version": "nteract-front-end@1.0.0" + }, + "vscode": { + "interpreter": { + "hash": "6bd77c88278e012ef31757c15997a7bea8c943977c43d6909403c00ae11d43ca" + } } }, "nbformat": 4, diff --git a/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/forecasting_script.py b/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/forecasting_script.py index ca8f2f19d..a6506bead 100644 --- a/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/forecasting_script.py +++ b/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/forecasting_script.py @@ -7,7 +7,7 @@ import argparse from azureml.core import Dataset, Run from azureml.automl.core.shared.constants import TimeSeriesInternal -from sklearn.externals import joblib +import joblib parser = argparse.ArgumentParser() parser.add_argument( diff --git a/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb b/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb index 260e7793c..c912fe153 100644 --- a/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb +++ b/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb @@ -1,5 +1,27 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": { + "hideCode": false, + "hidePrompt": false + }, + "source": [ + "Copyright (c) Microsoft Corporation. All rights reserved.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hideCode": false, + "hidePrompt": false + }, + "source": [ + "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.png)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -495,6 +517,30 @@ "#### Create conda configuration for model explanations experiment from automl_run object" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "from azureml.core import Environment\n", + "\n", + "\n", + "def get_environment_safe(parent_run):\n", + " \"\"\"Get the environment from parent run\"\"\"\n", + " try:\n", + " return parent_run.get_environment()\n", + " except BaseException:\n", + " run_details = parent_run.get_details()\n", + " run_def = run_details.get(\"runDefinition\")\n", + " env = run_def.get(\"environment\")\n", + " if env is None:\n", + " raise\n", + " json.dump(env, open(\"azureml_environment.json\", \"w\"))\n", + " return Environment.load_from_directory(\".\")" + ] + }, { "cell_type": "code", "execution_count": null, @@ -502,8 +548,6 @@ "outputs": [], "source": [ "from azureml.core.runconfig import RunConfiguration\n", - "from azureml.core.conda_dependencies import CondaDependencies\n", - "import pkg_resources\n", "\n", "# create a new RunConfig object\n", "conda_run_config = RunConfiguration(framework=\"python\")\n", @@ -513,7 +557,7 @@ "conda_run_config.environment.docker.enabled = True\n", "\n", "# specify CondaDependencies obj\n", - "conda_run_config.environment = automl_run.get_environment()" + "conda_run_config.environment = get_environment_safe(automl_run)" ] }, { @@ -686,7 +730,7 @@ " description=\"Get local explanations for Machine test data\",\n", ")\n", "\n", - "myenv = automl_run.get_environment()\n", + "myenv = get_environment_safe(automl_run)\n", "inference_config = InferenceConfig(entry_script=\"score_explain.py\", environment=myenv)\n", "\n", "# Use configs and models generated above\n", @@ -909,7 +953,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.7" + "version": "3.8.7" }, "tags": [ "featurization", diff --git a/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb b/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb index 2dbfa989c..2db729769 100644 --- a/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb +++ b/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb @@ -123,7 +123,7 @@ "import pickle\n", "import json\n", "import numpy\n", - "from sklearn.externals import joblib\n", + "import joblib\n", "from sklearn.linear_model import Ridge\n", "import time\n", "\n", diff --git a/how-to-use-azureml/deployment/onnx/onnx-train-pytorch-aml-deploy-mnist.ipynb b/how-to-use-azureml/deployment/onnx/onnx-train-pytorch-aml-deploy-mnist.ipynb index 006936462..46697ace1 100644 --- a/how-to-use-azureml/deployment/onnx/onnx-train-pytorch-aml-deploy-mnist.ipynb +++ b/how-to-use-azureml/deployment/onnx/onnx-train-pytorch-aml-deploy-mnist.ipynb @@ -105,7 +105,7 @@ " print('Found existing compute target.')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3', \n", " max_nodes=6)\n", "\n", " # create the cluster\n", @@ -620,7 +620,7 @@ }, "manual": null }, - "vm_size": "STANDARD_NC6" + "vm_size": "Standard_NC6s_v3" }, "error": "", "layout": "IPY_MODEL_c899ddfc2b134ca9b89a4f278ac7c997", diff --git a/how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.ipynb b/how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.ipynb index 1bda9522c..292db2a6a 100644 --- a/how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.ipynb +++ b/how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.ipynb @@ -136,6 +136,9 @@ "# Choose a name for your GPU cluster\n", "gpu_cluster_name = \"aks-gpu-cluster\"\n", "\n", + "# Choose a location for your GPU cluster\n", + "gpu_cluster_location = \"eastus\"\n", + "\n", "# Verify that cluster does not exist already\n", "try:\n", " gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)\n", @@ -146,7 +149,8 @@ " # Specify the configuration for the new cluster\n", " compute_config = AksCompute.provisioning_configuration(cluster_purpose=AksCompute.ClusterPurpose.DEV_TEST,\n", " agent_count=1,\n", - " vm_size=\"Standard_NV6\")\n", + " vm_size=\"Standard_NC6s_v3\",\n", + " location=gpu_cluster_location)\n", " # Create the cluster with the specified name and configuration\n", " gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)\n", "\n", @@ -170,7 +174,7 @@ "outputs": [], "source": [ "%%writefile score.py\n", - "import tensorflow as tf\n", + "import tensorflow.compat.v1 as tf\n", "import numpy as np\n", "import json\n", "import os\n", @@ -240,9 +244,9 @@ "# Please see [Azure ML Containers repository](https://github.com/Azure/AzureML-Containers#featured-tags)\n", "# for open-sourced GPU base images.\n", "env.docker.base_image = DEFAULT_GPU_IMAGE\n", - "env.python.conda_dependencies = CondaDependencies.create(python_version=\"3.6.2\", pin_sdk_version=False,\n", - " conda_packages=['tensorflow-gpu==1.12.0','numpy'],\n", - " pip_packages=['azureml-contrib-services==1.47.0', 'azureml-defaults==1.47.0'])\n", + "env.python.conda_dependencies = CondaDependencies.create(python_version=\"3.8\", pin_sdk_version=False,\n", + " conda_packages=['tensorflow-gpu','numpy'],\n", + " pip_packages=['azureml-contrib-services', 'azureml-defaults'])\n", "\n", "inference_config = InferenceConfig(entry_script=\"score.py\", environment=env)\n", "aks_config = AksWebservice.deploy_configuration()\n", diff --git a/how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.yml b/how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.yml index c2afb644b..48e39a5ae 100644 --- a/how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.yml +++ b/how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.yml @@ -2,4 +2,3 @@ name: production-deploy-to-aks-gpu dependencies: - pip: - azureml-sdk - - tensorflow diff --git a/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb b/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb index 2cb8fe0fd..81bf460a7 100644 --- a/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb +++ b/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb @@ -154,7 +154,7 @@ "import pickle\n", "import json\n", "import numpy\n", - "from sklearn.externals import joblib\n", + "import joblib\n", "from sklearn.linear_model import Ridge\n", "from inference_schema.schema_decorators import input_schema, output_schema\n", "from inference_schema.parameter_types.standard_py_parameter_type import StandardPythonParameterType\n", diff --git a/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb b/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb index 63b89c4e9..6b2319186 100644 --- a/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb +++ b/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb @@ -154,7 +154,7 @@ "import pickle\n", "import json\n", "import numpy\n", - "from sklearn.externals import joblib\n", + "import joblib\n", "from sklearn.linear_model import Ridge\n", "\n", "def init():\n", diff --git a/how-to-use-azureml/deployment/spark/iris.model/data/_SUCCESS b/how-to-use-azureml/deployment/spark/iris.model/data/_SUCCESS deleted file mode 100644 index e69de29bb..000000000 diff --git a/how-to-use-azureml/deployment/spark/iris.model/data/part-00000-dabcf097-2b45-4b28-bbca-6c17889ddcbf-c000.snappy.parquet b/how-to-use-azureml/deployment/spark/iris.model/data/part-00000-dabcf097-2b45-4b28-bbca-6c17889ddcbf-c000.snappy.parquet deleted file mode 100644 index 8f17afc95530e57737b675574398f8eb1f3aa5e5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4224 zcmcgwU1%d!6rP!+lQB(qQJi7ohNT1>T8JU(-_{yzrHgcnt97j+h$}af+d4X#NoOWq zYbi((eDI}jg5rvA#f8;31s}4A^r7lo#Xk^P*1}4~Rm=A_o3c*3@|HT) zkeT8i%knG;vpky;xsSiT&ORXt<05zdz3!)8x|q5GCN$ZFJ9=E;pPpH(Ut0a@ z7OnkWpMOpHKeQi|K-nm|Kyz$6JJKgKkU8s z`jZd;R(A~n8+WCVNYn5GOj*CdgqR&@ng&URXMOb$l%y$|XOg#1OeTyf(qDeTkY2GVv9bPOhFO=DUcQ! z)r0W|(!)^!XV1#><80CdXov-adV?uoh2DZx1wFG#17@Qi2D+mkbDe2{NixX791)6{ zb*2cd(XYD_u^eNm1v3fpcL?d%Io%F6@ZA;4{=)mF?4W0(fF zRD<+00tw_+EEvekcFmALhQWO0+#K&R7sR^>cur!3@%Bg!QTUb-eq=H?p+Y&gyYD+t z{Hd|o3FYflTKL*s{lg-BTA0afb`}E@!H>9>Ac$5k+1+crqF76`&=)>R2&DMLK24v? zp(=ubk+Wt;&X`wnFhu5S*vvt@%vG?7%E(0G)x=Vo421M(+A#H+WTvy}Ss5#3BR%@u zQd&h7azxs)J!|0D5hvqi4(qU?))WPm6cC(6ih;9f7d<_?w7i`4qqqa=Fff6+=0SOm z)XKyQi5LvFhq$OHIYm~HGMBRl}>Z_?^!~YWcWqX;tVg5;m~yq_H0}`zMWG?&y-nc}{u$`QqAc-Z^iu z!$3GsHxJ@yaO~FWojn>gVNb&4?_5PQuC~?Y}Jr7j$ z?ggwHvZi(ya)o>@-`!iP$x699KTtr!!$X7l{DXL)G}52%&*uyMs4y}(FenX;^q0z~ Z2BlL2s4uylqNp~1sPQg}nuh;9{0kvDFQfnf diff --git a/how-to-use-azureml/deployment/spark/iris.model/metadata/_SUCCESS b/how-to-use-azureml/deployment/spark/iris.model/metadata/_SUCCESS deleted file mode 100644 index e69de29bb..000000000 diff --git a/how-to-use-azureml/deployment/spark/iris.model/metadata/part-00000 b/how-to-use-azureml/deployment/spark/iris.model/metadata/part-00000 deleted file mode 100644 index 312ecb55e..000000000 --- a/how-to-use-azureml/deployment/spark/iris.model/metadata/part-00000 +++ /dev/null @@ -1 +0,0 @@ -{"class":"org.apache.spark.ml.classification.LogisticRegressionModel","timestamp":1570147252329,"sparkVersion":"2.4.0","uid":"LogisticRegression_5df3978caaf3","paramMap":{"regParam":0.01},"defaultParamMap":{"aggregationDepth":2,"threshold":0.5,"rawPredictionCol":"rawPrediction","featuresCol":"features","labelCol":"label","predictionCol":"prediction","family":"auto","regParam":0.0,"tol":1.0E-6,"probabilityCol":"probability","standardization":true,"elasticNetParam":0.0,"maxIter":100,"fitIntercept":true}} diff --git a/how-to-use-azureml/deployment/spark/model-register-and-deploy-spark.ipynb b/how-to-use-azureml/deployment/spark/model-register-and-deploy-spark.ipynb deleted file mode 100644 index 71a96a743..000000000 --- a/how-to-use-azureml/deployment/spark/model-register-and-deploy-spark.ipynb +++ /dev/null @@ -1,349 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/deployment/deploy-to-cloud/model-register-and-deploy.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Register Spark Model and deploy as Webservice\n", - "\n", - "This example shows how to deploy a Webservice in step-by-step fashion:\n", - "\n", - " 1. Register Spark Model\n", - " 2. Deploy Spark Model as Webservice" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Prerequisites\n", - "If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration](../../../configuration.ipynb) Notebook first if you haven't." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Check core SDK version number\n", - "import azureml.core\n", - "\n", - "print(\"SDK version:\", azureml.core.VERSION)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Initialize Workspace\n", - "\n", - "Initialize a workspace object from persisted configuration." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "create workspace" - ] - }, - "outputs": [], - "source": [ - "from azureml.core import Workspace\n", - "\n", - "ws = Workspace.from_config()\n", - "print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\\n')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Register Model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can add tags and descriptions to your Models. Note you need to have a `iris.model` file in the current directory. This model file is generated using [train in spark](../training/train-in-spark/train-in-spark.ipynb) notebook. The below call registers that file as a Model with the same name `iris.model` in the workspace.\n", - "\n", - "Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "register model from file" - ] - }, - "outputs": [], - "source": [ - "from azureml.core.model import Model\n", - "\n", - "model = Model.register(model_path=\"iris.model\",\n", - " model_name=\"iris.model\",\n", - " tags={'type': \"regression\"},\n", - " description=\"Logistic regression model to predict iris species\",\n", - " workspace=ws)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Fetch Environment" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can now create and/or use an Environment object when deploying a Webservice. The Environment can have been previously registered with your Workspace, or it will be registered with it as a part of the Webservice deployment.\n", - "\n", - "More information can be found in our [using environments notebook](../training/using-environments/using-environments.ipynb)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core import Environment\r\n", - "from azureml.core.environment import SparkPackage\r\n", - "from azureml.core.conda_dependencies import CondaDependencies\r\n", - "\r\n", - "myenv = Environment('my-pyspark-environment')\r\n", - "myenv.docker.base_image = \"mcr.microsoft.com/mmlspark/release:0.15\"\r\n", - "myenv.inferencing_stack_version = \"latest\"\r\n", - "myenv.python.conda_dependencies = CondaDependencies.create(pip_packages=[\"azureml-core\",\"azureml-defaults\",\"azureml-telemetry\",\"azureml-train-restclients-hyperdrive\",\"azureml-train-core\"], python_version=\"3.7.0\")\r\n", - "myenv.python.conda_dependencies.add_channel(\"conda-forge\")\r\n", - "myenv.spark.packages = [SparkPackage(\"com.microsoft.ml.spark\", \"mmlspark_2.11\", \"0.15\"), SparkPackage(\"com.microsoft.azure\", \"azure-storage\", \"2.0.0\"), SparkPackage(\"org.apache.hadoop\", \"hadoop-azure\", \"2.7.0\")]\r\n", - "myenv.spark.repositories = [\"https://mmlspark.azureedge.net/maven\"]\r\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create Inference Configuration\n", - "\n", - "There is now support for a source directory, you can upload an entire folder from your local machine as dependencies for the Webservice.\n", - "Note: in that case, your entry_script is relative path to the source_directory path.\n", - "\n", - "Sample code for using a source directory:\n", - "\n", - "```python\n", - "inference_config = InferenceConfig(source_directory=\"C:/abc\",\n", - " entry_script=\"x/y/score.py\",\n", - " environment=environment)\n", - "```\n", - "\n", - " - source_directory = holds source path as string, this entire folder gets added in image so its really easy to access any files within this folder or subfolder\n", - " - entry_script = contains logic specific to initializing your model and running predictions\n", - " - environment = An environment object to use for the deployment. Doesn't have to be registered" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "create image" - ] - }, - "outputs": [], - "source": [ - "from azureml.core.model import InferenceConfig\n", - "\n", - "inference_config = InferenceConfig(entry_script=\"score.py\", environment=myenv)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Deploy Model as Webservice on Azure Container Instance\n", - "\n", - "Note that the service creation can take few minutes." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "azuremlexception-remarks-sample" - ] - }, - "outputs": [], - "source": [ - "from azureml.core.webservice import AciWebservice, Webservice\n", - "from azureml.exceptions import WebserviceException\n", - "\n", - "deployment_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1)\n", - "aci_service_name = 'aciservice1'\n", - "\n", - "try:\n", - " # if you want to get existing service below is the command\n", - " # since aci name needs to be unique in subscription deleting existing aci if any\n", - " # we use aci_service_name to create azure aci\n", - " service = Webservice(ws, name=aci_service_name)\n", - " if service:\n", - " service.delete()\n", - "except WebserviceException as e:\n", - " print()\n", - "\n", - "service = Model.deploy(ws, aci_service_name, [model], inference_config, deployment_config)\n", - "\n", - "service.wait_for_deployment(True)\n", - "print(service.state)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Test web service" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "test_sample = json.dumps({'features':{'type':1,'values':[4.3,3.0,1.1,0.1]},'label':2.0})\n", - "\n", - "test_sample_encoded = bytes(test_sample, encoding='utf8')\n", - "prediction = service.run(input_data=test_sample_encoded)\n", - "print(prediction)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Delete ACI to clean up" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "deploy service", - "aci" - ] - }, - "outputs": [], - "source": [ - "service.delete()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Model Profiling\n", - "\n", - "You can also take advantage of the profiling feature to estimate CPU and memory requirements for models.\n", - "\n", - "```python\n", - "profile = Model.profile(ws, \"profilename\", [model], inference_config, test_sample)\n", - "profile.wait_for_profiling(True)\n", - "profiling_results = profile.get_results()\n", - "print(profiling_results)\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Model Packaging\n", - "\n", - "If you want to build a Docker image that encapsulates your model and its dependencies, you can use the model packaging option. The output image will be pushed to your workspace's ACR.\n", - "\n", - "You must include an Environment object in your inference configuration to use `Model.package()`.\n", - "\n", - "```python\n", - "package = Model.package(ws, [model], inference_config)\n", - "package.wait_for_creation(show_output=True) # Or show_output=False to hide the Docker build logs.\n", - "package.pull()\n", - "```\n", - "\n", - "Instead of a fully-built image, you can also generate a Dockerfile and download all the assets needed to build an image on top of your Environment.\n", - "\n", - "```python\n", - "package = Model.package(ws, [model], inference_config, generate_dockerfile=True)\n", - "package.wait_for_creation(show_output=True)\n", - "package.save(\"./local_context_dir\")\n", - "```" - ] - } - ], - "metadata": { - "authors": [ - { - "name": "vaidyas" - } - ], - "category": "deployment", - "compute": [ - "None" - ], - "datasets": [ - "Iris" - ], - "deployment": [ - "Azure Container Instance" - ], - "exclude_from_index": false, - "framework": [ - "PySpark" - ], - "friendly_name": "Register Spark model and deploy as webservice", - "kernelspec": { - "display_name": "Python 3.8 - AzureML", - "language": "python", - "name": "python38-azureml" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file diff --git a/how-to-use-azureml/deployment/spark/model-register-and-deploy-spark.yml b/how-to-use-azureml/deployment/spark/model-register-and-deploy-spark.yml deleted file mode 100644 index 8414fbb0d..000000000 --- a/how-to-use-azureml/deployment/spark/model-register-and-deploy-spark.yml +++ /dev/null @@ -1,4 +0,0 @@ -name: model-register-and-deploy-spark -dependencies: -- pip: - - azureml-sdk diff --git a/how-to-use-azureml/deployment/spark/score.py b/how-to-use-azureml/deployment/spark/score.py deleted file mode 100644 index 48543326e..000000000 --- a/how-to-use-azureml/deployment/spark/score.py +++ /dev/null @@ -1,37 +0,0 @@ -import traceback -from pyspark.ml.linalg import VectorUDT -from azureml.core.model import Model -from pyspark.ml.classification import LogisticRegressionModel -from pyspark.sql.types import StructType, StructField -from pyspark.sql.types import DoubleType -from pyspark.sql import SQLContext -from pyspark import SparkContext - -sc = SparkContext.getOrCreate() -sqlContext = SQLContext(sc) -spark = sqlContext.sparkSession - -input_schema = StructType([StructField("features", VectorUDT()), StructField("label", DoubleType())]) -reader = spark.read -reader.schema(input_schema) - - -def init(): - global model - # note here "iris.model" is the name of the model registered under the workspace - # this call should return the path to the model.pkl file on the local disk. - model_path = Model.get_model_path('iris.model') - # Load the model file back into a LogisticRegression model - model = LogisticRegressionModel.load(model_path) - - -def run(data): - try: - input_df = reader.json(sc.parallelize([data])) - result = model.transform(input_df) - # you can return any datatype as long as it is JSON-serializable - return result.collect()[0]['prediction'] - except Exception as e: - traceback.print_exc() - error = str(e) - return error diff --git a/how-to-use-azureml/explain-model/azure-integration/gpu-explanation/gpu_tree_explainer.py b/how-to-use-azureml/explain-model/azure-integration/gpu-explanation/gpu_tree_explainer.py deleted file mode 100644 index 9819c6ad8..000000000 --- a/how-to-use-azureml/explain-model/azure-integration/gpu-explanation/gpu_tree_explainer.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -# Licensed under the MIT license. - -from azureml.core.run import Run -from azureml.interpret import ExplanationClient -from interpret_community.adapter import ExplanationAdapter -import joblib -import os -import shap -import xgboost - -OUTPUT_DIR = './outputs/' -os.makedirs(OUTPUT_DIR, exist_ok=True) - -run = Run.get_context() -client = ExplanationClient.from_run(run) - -# get a dataset on income prediction -X, y = shap.datasets.adult() -features = X.columns.values - -# train an XGBoost model (but any other tree model type should work) -model = xgboost.XGBClassifier() -model.fit(X, y) - -explainer = shap.explainers.GPUTree(model, X) -X_shap = X[:100] -shap_values = explainer(X_shap) - -print("computed shap values:") -print(shap_values) - -# Use the explanation adapter to convert the importances into an interpret-community -# style explanation which can be uploaded to AzureML or visualized in the -# ExplanationDashboard widget -adapter = ExplanationAdapter(features, classification=True) -global_explanation = adapter.create_global(shap_values.values, X_shap, expected_values=shap_values.base_values) - -# write X_shap out as a pickle file for later visualization -x_shap_pkl = 'x_shap.pkl' -with open(x_shap_pkl, 'wb') as file: - joblib.dump(value=X_shap, filename=os.path.join(OUTPUT_DIR, x_shap_pkl)) -run.upload_file('x_shap_adult_census.pkl', os.path.join(OUTPUT_DIR, x_shap_pkl)) - -model_file_name = 'xgboost_.pkl' -# save model in the outputs folder so it automatically gets uploaded -with open(model_file_name, 'wb') as file: - joblib.dump(value=model, filename=os.path.join(OUTPUT_DIR, - model_file_name)) - -# register the model -run.upload_file('xgboost_model.pkl', os.path.join('./outputs/', model_file_name)) -original_model = run.register_model(model_name='xgboost_with_gpu_tree_explainer', - model_path='xgboost_model.pkl') - -# Uploading model explanation data for storage or visualization in webUX -# The explanation can then be downloaded on any compute -comment = 'Global explanation on classification model trained on adult census income dataset' -client.upload_model_explanation(global_explanation, comment=comment, model_id=original_model.id) diff --git a/how-to-use-azureml/explain-model/azure-integration/gpu-explanation/train-explain-model-gpu-tree-explainer.ipynb b/how-to-use-azureml/explain-model/azure-integration/gpu-explanation/train-explain-model-gpu-tree-explainer.ipynb deleted file mode 100644 index 7f05de9c5..000000000 --- a/how-to-use-azureml/explain-model/azure-integration/gpu-explanation/train-explain-model-gpu-tree-explainer.ipynb +++ /dev/null @@ -1,517 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Explain tree-based models on GPU using GPUTreeExplainer\n", - "\n", - "\n", - "_**This notebook illustrates how to use shap's GPUTreeExplainer on an Azure GPU machine.**_\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Problem: Train a tree-based model and explain the model on an Azure GPU machine using the GPUTreeExplainer.\n", - "\n", - "---\n", - "\n", - "## Table of Contents\n", - "\n", - "1. [Introduction](#Introduction)\n", - "1. [Setup](#Setup)\n", - "1. [Run model explainer locally at training time](#Explain)\n", - " 1. Apply feature transformations\n", - " 1. Train a binary classification model\n", - " 1. Explain the model on raw features\n", - " 1. Generate global explanations\n", - " 1. Generate local explanations\n", - "1. [Visualize explanations](#Visualize)\n", - "1. [Deploy model and scoring explainer](#Deploy)\n", - "1. [Next steps](#Next)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Introduction\n", - "This notebook demonstrates how to use the GPUTreeExplainer on some simple datasets. Like the TreeExplainer, the GPUTreeExplainer is specifically designed for tree-based machine learning models, but it is designed to accelerate the computations using NVIDIA GPUs.\n", - "\n", - "\n", - "Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.\n", - "\n", - "Notebook synopsis:\n", - "\n", - "1. Creating an Experiment in an existing Workspace\n", - "2. Configuration and remote run with a GPU machine" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setup" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import logging\n", - "import os\n", - "import shutil\n", - "\n", - "import pandas as pd\n", - "\n", - "import azureml.core\n", - "from azureml.core.experiment import Experiment\n", - "from azureml.core.workspace import Workspace\n", - "from azureml.core.dataset import Dataset\n", - "from azureml.core.compute import AmlCompute\n", - "from azureml.core.compute import ComputeTarget\n", - "from azureml.core.run import Run\n", - "from azureml.core.model import Model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This sample notebook may use features that are not available in previous versions of the Azure ML SDK." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n", - "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As part of the setup you have already created a Workspace. To run the script, you also need to create an Experiment. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ws = Workspace.from_config()\n", - "\n", - "# Choose an experiment name.\n", - "experiment_name = 'gpu-tree-explainer'\n", - "\n", - "experiment = Experiment(ws, experiment_name)\n", - "\n", - "output = {}\n", - "output['Subscription ID'] = ws.subscription_id\n", - "output['Workspace Name'] = ws.name\n", - "output['Resource Group'] = ws.resource_group\n", - "output['Location'] = ws.location\n", - "output['Experiment Name'] = experiment.name\n", - "pd.set_option('display.max_colwidth', -1)\n", - "outputDf = pd.DataFrame(data = output, index = [''])\n", - "outputDf.T" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Create project directory\n", - "\n", - "Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script, and any additional files your training script depends on" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import shutil\n", - "\n", - "project_folder = './azureml-shap-gpu-tree-explainer'\n", - "os.makedirs(project_folder, exist_ok=True)\n", - "shutil.copy('gpu_tree_explainer.py', project_folder)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Set up a compute cluster\n", - "This section uses a user-provided compute cluster (named \"gpu-shap-cluster\" in this example). If a cluster with this name does not exist in the user's workspace, the below code will create a new cluster. You can choose the parameters of the cluster as mentioned in the comments." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core.compute import ComputeTarget, AmlCompute\n", - "from azureml.core.compute_target import ComputeTargetException\n", - "\n", - "num_nodes = 1\n", - "\n", - "# Choose a name for your cluster.\n", - "amlcompute_cluster_name = \"gpu-shap-cluster\"\n", - "\n", - "# Verify that cluster does not exist already\n", - "try:\n", - " compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n", - " print('Found existing cluster, use it.')\n", - "except ComputeTargetException:\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\",\n", - " # To use GPUTreeExplainer, select a GPU such as \"STANDARD_NC6\" \n", - " # or similar GPU option\n", - " # available in your workspace\n", - " max_nodes = num_nodes)\n", - " compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n", - "\n", - "compute_target.wait_for_completion(show_output=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Configure & Run" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.core.runconfig import RunConfiguration\n", - "from azureml.core.conda_dependencies import CondaDependencies\n", - "\n", - "# Create a new RunConfig object\n", - "run_config = RunConfiguration(framework=\"python\")\n", - "\n", - "# Set compute target to AmlCompute target created in previous step\n", - "run_config.target = amlcompute_cluster_name\n", - "\n", - "from azureml.core import Environment\n", - "\n", - "environment_name = \"shapgpu\"\n", - "env = Environment(environment_name)\n", - "\n", - "env.docker.enabled = True\n", - "env.docker.base_image = None\n", - "\n", - "\n", - "# Note: this is to pin the pandas and xgboost versions to be same as notebook.\n", - "# In production scenario user would choose their dependencies\n", - "import pkg_resources\n", - "from distutils.version import LooseVersion\n", - "available_packages = pkg_resources.working_set\n", - "pandas_ver = None\n", - "numpy_ver = None\n", - "sklearn_ver = None\n", - "for dist in list(available_packages):\n", - " if dist.key == 'pandas':\n", - " pandas_ver = dist.version\n", - " if dist.key == 'numpy':\n", - " if LooseVersion(dist.version) >= LooseVersion('1.20.0'):\n", - " numpy_ver = dist.version\n", - " else:\n", - " numpy_ver = '1.21.6'\n", - " if dist.key == 'scikit-learn':\n", - " sklearn_ver = dist.version\n", - "pandas_dep = 'pandas'\n", - "numpy_dep = 'numpy'\n", - "sklearn_dep = 'scikit-learn'\n", - "if pandas_ver:\n", - " pandas_dep = 'pandas=={}'.format(pandas_ver)\n", - "if numpy_ver:\n", - " numpy_dep = 'numpy=={}'.format(numpy_ver)\n", - "if sklearn_ver:\n", - " sklearn_dep = 'scikit-learn=={}'.format(sklearn_ver)\n", - "\n", - "# Note: we build shap at commit 690245 for Tesla K80 GPUs\n", - "env.docker.base_dockerfile = f\"\"\"\n", - "FROM nvidia/cuda:10.2-devel-ubuntu18.04\n", - "ENV PATH=\"/root/miniconda3/bin:${{PATH}}\"\n", - "ARG PATH=\"/root/miniconda3/bin:${{PATH}}\"\n", - "RUN apt-get update && \\\n", - "apt-get install -y fuse && \\\n", - "apt-get install -y build-essential && \\\n", - "apt-get install -y python3-dev && \\\n", - "apt-get install -y wget && \\\n", - "apt-get install -y git && \\\n", - "rm -rf /var/lib/apt/lists/* && \\\n", - "wget \\\n", - "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \\\n", - "mkdir /root/.conda && \\\n", - "bash Miniconda3-latest-Linux-x86_64.sh -b && \\\n", - "rm -f Miniconda3-latest-Linux-x86_64.sh && \\\n", - "conda init bash && \\\n", - ". ~/.bashrc && \\\n", - "conda create -n shapgpu python=3.8 && \\\n", - "conda activate shapgpu && \\\n", - "apt-get install -y g++ && \\\n", - "printenv && \\\n", - "echo \"which nvcc: \" && \\\n", - "which nvcc && \\\n", - "pip install azureml-defaults && \\\n", - "pip install azureml-telemetry && \\\n", - "pip install azureml-interpret && \\\n", - "pip install {pandas_dep} && \\\n", - "cd /usr/local/src && \\\n", - "git clone https://github.com/slundberg/shap.git --single-branch && \\\n", - "cd shap && \\\n", - "git reset --hard 690245c6ab043edf40cfce3d8438a62e29ab599f && \\\n", - "mkdir build && \\\n", - "python setup.py install --user && \\\n", - "pip uninstall -y xgboost && \\\n", - "conda install py-xgboost==1.3.3 && \\\n", - "pip uninstall -y numpy && \\\n", - "pip install {numpy_dep} && \\\n", - "pip install {sklearn_dep} && \\\n", - "pip install chardet \\\n", - "\"\"\"\n", - "\n", - "env.python.user_managed_dependencies = True\n", - "env.python.interpreter_path = '/root/miniconda3/envs/shapgpu/bin/python'\n", - "\n", - "from azureml.core import Run\n", - "from azureml.core import ScriptRunConfig\n", - "\n", - "src = ScriptRunConfig(source_directory=project_folder, \n", - " script='gpu_tree_explainer.py', \n", - " compute_target=amlcompute_cluster_name,\n", - " environment=env) \n", - "run = experiment.submit(config=src)\n", - "run" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%time\n", - "# Shows output of the run on stdout.\n", - "run.wait_for_completion(show_output=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run.get_metrics()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Download \n", - "1. Download model explanation data." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.interpret import ExplanationClient\n", - "\n", - "# Get model explanation data\n", - "client = ExplanationClient.from_run(run)\n", - "global_explanation = client.download_model_explanation()\n", - "local_importance_values = global_explanation.local_importance_values\n", - "expected_values = global_explanation.expected_values" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Get the top k (e.g., 4) most important features with their importance values\n", - "global_explanation_topk = client.download_model_explanation(top_k=4)\n", - "global_importance_values = global_explanation_topk.get_ranked_global_values()\n", - "global_importance_names = global_explanation_topk.get_ranked_global_names()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print('global importance values: {}'.format(global_importance_values))\n", - "print('global importance names: {}'.format(global_importance_names))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "2. Download model file." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Retrieve model for visualization and deployment\n", - "from azureml.core.model import Model\n", - "import joblib\n", - "original_model = Model(ws, 'xgboost_with_gpu_tree_explainer')\n", - "model_path = original_model.download(exist_ok=True)\n", - "original_model = joblib.load(model_path)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "3. Download test dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Retrieve x_test for visualization\n", - "x_test_path = './x_shap_adult_census.pkl'\n", - "run.download_file('x_shap_adult_census.pkl', output_file_path=x_test_path)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "x_test = joblib.load('x_shap_adult_census.pkl')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Visualize\n", - "Load the visualization dashboard" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from raiwidgets import ExplanationDashboard" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from interpret_community.common.model_wrapper import wrap_model\n", - "from interpret_community.dataset.dataset_wrapper import DatasetWrapper\n", - "# note we need to wrap the XGBoost model to output predictions and probabilities in the scikit-learn format\n", - "class WrappedXGBoostModel(object):\n", - " \"\"\"A class for wrapping an XGBoost model to output integer predicted classes.\"\"\"\n", - "\n", - " def __init__(self, model):\n", - " self.model = model\n", - "\n", - " def predict(self, dataset):\n", - " return self.model.predict(dataset).astype(int)\n", - "\n", - " def predict_proba(self, dataset):\n", - " return self.model.predict_proba(dataset)\n", - "\n", - "wrapped_model = WrappedXGBoostModel(wrap_model(original_model, DatasetWrapper(x_test), model_task='classification'))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ExplanationDashboard(global_explanation, wrapped_model, dataset=x_test)" - ] - } - ], - "metadata": { - "authors": [ - { - "name": "ilmat" - } - ], - "kernelspec": { - "display_name": "Python 3.8 - AzureML", - "language": "python", - "name": "python38-azureml" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file diff --git a/how-to-use-azureml/explain-model/azure-integration/gpu-explanation/train-explain-model-gpu-tree-explainer.yml b/how-to-use-azureml/explain-model/azure-integration/gpu-explanation/train-explain-model-gpu-tree-explainer.yml deleted file mode 100644 index 3946b1a13..000000000 --- a/how-to-use-azureml/explain-model/azure-integration/gpu-explanation/train-explain-model-gpu-tree-explainer.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: train-explain-model-gpu-tree-explainer -dependencies: -- py-xgboost==1.3.3 -- pip: - - azureml-sdk - - azureml-interpret - - flask - - flask-cors - - gevent>=1.3.6 - - ipython - - matplotlib - - ipywidgets - - raiwidgets~=0.26.0 - - itsdangerous==2.0.1 - - markupsafe<2.1.0 - - scipy>=1.5.3 - - protobuf==3.20.0 - - jinja2==3.0.3 diff --git a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.yml b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.yml index b79e5a216..1d3a73540 100644 --- a/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.yml +++ b/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.yml @@ -8,9 +8,8 @@ dependencies: - gevent>=1.3.6 - ipython - matplotlib - - azureml-dataset-runtime - ipywidgets - - raiwidgets~=0.26.0 + - raiwidgets~=0.28.0 - itsdangerous==2.0.1 - markupsafe<2.1.0 - scipy>=1.5.3 diff --git a/how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.yml b/how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.yml index 801f4962a..ec8edd0fb 100644 --- a/how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.yml +++ b/how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.yml @@ -9,7 +9,7 @@ dependencies: - ipython - matplotlib - ipywidgets - - raiwidgets~=0.26.0 + - raiwidgets~=0.28.0 - packaging>=20.9 - itsdangerous==2.0.1 - markupsafe<2.1.0 diff --git a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.yml b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.yml index e63912b78..25dfc62c8 100644 --- a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.yml +++ b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.yml @@ -9,7 +9,7 @@ dependencies: - ipython - matplotlib - ipywidgets - - raiwidgets~=0.26.0 + - raiwidgets~=0.28.0 - packaging>=20.9 - itsdangerous==2.0.1 - markupsafe<2.1.0 diff --git a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.yml b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.yml index 6812a6dda..57d47be71 100644 --- a/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.yml +++ b/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.yml @@ -8,10 +8,9 @@ dependencies: - gevent>=1.3.6 - ipython - matplotlib - - azureml-dataset-runtime - azureml-core - ipywidgets - - raiwidgets~=0.26.0 + - raiwidgets~=0.28.0 - itsdangerous==2.0.1 - markupsafe<2.1.0 - scipy>=1.5.3 diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb index 2085c59a5..091c16a9a 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb @@ -233,7 +233,7 @@ " print('Found existing compute target {}.'.format(cluster_name))\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size=\"Standard_NC6s_v3\",\n", " max_nodes=4)\n", "\n", " compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb index 40abdc1f9..6f57131f9 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb @@ -133,7 +133,7 @@ " \n", "if not found:\n", " print('Creating a new compute target...')\n", - " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"STANDARD_NC6\"\n", + " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"Standard_NC6s_v3\"\n", " #vm_priority = 'lowpriority', # optional\n", " max_nodes = 4)\n", "\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb index 226ac0bc8..e3f8a40d2 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb @@ -136,7 +136,7 @@ " \n", "if not found:\n", " print('Creating a new compute target...')\n", - " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"STANDARD_NC6\"\n", + " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"Standard_NC6s_v3\"\n", " #vm_priority = 'lowpriority', # optional\n", " max_nodes = 4)\n", "\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb index f54e0e3e8..02cf53de3 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb @@ -148,7 +148,7 @@ " compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n", " print('Found existing cluster, use it.')\n", "except ComputeTargetException:\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',# for GPU, use \"STANDARD_NC6\"\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',# for GPU, use \"Standard_NC6s_v3\"\n", " #vm_priority = 'lowpriority', # optional\n", " max_nodes=4)\n", " compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep.ipynb index 7c7871631..086c43b96 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep.ipynb @@ -86,7 +86,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_NC6` GPU VMs. This process is broken down into 3 steps:\n", + "If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `Standard_NC6s_v3` GPU VMs. This process is broken down into 3 steps:\n", "1. create the configuration (this step is local and only takes a second)\n", "2. create the cluster (this step will take about **20 seconds**)\n", "3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell" @@ -109,7 +109,7 @@ " print('Found existing compute target')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', max_nodes=4)\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3', max_nodes=4)\n", "\n", " # create the cluster\n", " gpu_cluster = ComputeTarget.create(ws, cluster_name, compute_config)\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb index 4965b2305..68f7c4c96 100644 --- a/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb @@ -176,7 +176,7 @@ " \n", "if not found:\n", " print('Creating a new compute target...')\n", - " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"STANDARD_NC6\"\n", + " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"Standard_NC6s_v3\"\n", " #vm_priority = 'lowpriority', # optional\n", " max_nodes = 4)\n", "\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb index d11b6ac70..267d97268 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-image-inference-mnist.ipynb @@ -105,7 +105,7 @@ "compute_min_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MIN_NODES\", 0)\n", "compute_max_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MAX_NODES\", 4)\n", "\n", - "# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6\n", + "# This example uses CPU VM. For using GPU VM, set SKU to Standard_NC6s_v3\n", "vm_size = os.environ.get(\"AML_COMPUTE_CLUSTER_SKU\", \"STANDARD_D2_V2\")\n", "\n", "\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-partition-per-folder.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-partition-per-folder.ipynb index aa59149ac..98922dc82 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-partition-per-folder.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/file-dataset-partition-per-folder.ipynb @@ -143,7 +143,7 @@ "compute_min_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MIN_NODES\", 0)\n", "compute_max_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MAX_NODES\", 2)\n", "\n", - "# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6\n", + "# This example uses CPU VM. For using GPU VM, set SKU to Standard_NC6s_v3\n", "vm_size = os.environ.get(\"AML_COMPUTE_CLUSTER_SKU\", \"STANDARD_D2_V2\")\n", "\n", "\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb index 38168e1e7..103fd61a8 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-inference-iris.ipynb @@ -103,7 +103,7 @@ "compute_min_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MIN_NODES\", 0)\n", "compute_max_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MAX_NODES\", 4)\n", "\n", - "# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6\n", + "# This example uses CPU VM. For using GPU VM, set SKU to Standard_NC6s_v3\n", "vm_size = os.environ.get(\"AML_COMPUTE_CLUSTER_SKU\", \"STANDARD_D2_V2\")\n", "\n", "\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-partition-per-column.ipynb b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-partition-per-column.ipynb index 726ea9efc..3cd29fed9 100644 --- a/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-partition-per-column.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/parallel-run/tabular-dataset-partition-per-column.ipynb @@ -86,7 +86,8 @@ "import requests\n", "\n", "oj_sales_path = \"./oj.csv\"\n", - "r = requests.get(\"https://raw.githubusercontent.com/Azure/azureml-examples/main/sdk/python/jobs/automl-standalone-jobs/automl-forecasting-orange-juice-sales/data/dominicks_OJ.csv\")\n", + "r = requests.get(\"https://raw.githubusercontent.com/Azure/MachineLearningNotebooks/master/how-to-use-azureml/\n", + " automated-machine-learning/automl-forecasting-orange-juice-sales/data/dominicks_OJ.csv\")\n", "open(oj_sales_path, \"wb\").write(r.content)" ] }, @@ -165,7 +166,7 @@ "compute_min_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MIN_NODES\", 0)\n", "compute_max_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MAX_NODES\", 2)\n", "\n", - "# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6\n", + "# This example uses CPU VM. For using GPU VM, set SKU to Standard_NC6s_v3\n", "vm_size = os.environ.get(\"AML_COMPUTE_CLUSTER_SKU\", \"STANDARD_D2_V2\")\n", "\n", "\n", diff --git a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer-parallel-run.ipynb b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer-parallel-run.ipynb index e1aec369c..6207b931e 100644 --- a/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer-parallel-run.ipynb +++ b/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer-parallel-run.ipynb @@ -210,7 +210,7 @@ " print(\"found existing cluster.\")\n", "except ComputeTargetException:\n", " print(\"creating new cluster\")\n", - " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_NC6\",\n", + " provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"Standard_NC6s_v3\",\n", " max_nodes = 3)\n", "\n", " # create the cluster\n", diff --git a/how-to-use-azureml/ml-frameworks/fastai/fastai-with-custom-docker/fastai-with-custom-docker.ipynb b/how-to-use-azureml/ml-frameworks/fastai/fastai-with-custom-docker/fastai-with-custom-docker.ipynb index fdcaedc5d..9ba4713a2 100644 --- a/how-to-use-azureml/ml-frameworks/fastai/fastai-with-custom-docker/fastai-with-custom-docker.ipynb +++ b/how-to-use-azureml/ml-frameworks/fastai/fastai-with-custom-docker/fastai-with-custom-docker.ipynb @@ -246,7 +246,7 @@ " print('Found existing compute target.')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3',\n", " max_nodes=4)\n", "\n", " # create the cluster\n", diff --git a/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb index 0a406bc5b..08dc7e0f7 100644 --- a/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb +++ b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -10,6 +11,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -17,6 +19,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -40,6 +43,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -80,6 +84,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -101,6 +106,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -131,6 +137,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -168,6 +175,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -206,6 +214,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -213,6 +222,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -240,6 +250,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -269,6 +280,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -279,10 +291,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_NC6` GPU VMs. This process is broken down into 3 steps:\n", + "If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `Standard_NC6s_v3` GPU VMs. This process is broken down into 3 steps:\n", "1. create the configuration (this step is local and only takes a second)\n", "2. create the cluster (this step will take about **20 seconds**)\n", "3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell" @@ -305,7 +318,7 @@ " print('Found existing compute target')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3', \n", " max_nodes=4)\n", "\n", " # create the cluster\n", @@ -320,6 +333,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -338,6 +352,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -361,6 +376,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -375,6 +391,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -394,6 +411,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -411,6 +429,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -430,12 +449,12 @@ "channels:\n", "- conda-forge\n", "dependencies:\n", - "- python=3.7\n", - "- pip=21.3.1\n", + "- python=3.8\n", + "- pip=23.1.2\n", "- pip:\n", " - h5py<=2.10.0\n", " - azureml-defaults\n", - " - tensorflow-gpu==2.0.0\n", + " - tensorflow-gpu==2.2.0\n", " - keras<=2.3.1\n", " - matplotlib\n", " - protobuf==3.20.1" @@ -457,6 +476,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -501,6 +521,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -518,6 +539,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -547,6 +569,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -572,6 +595,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -579,6 +603,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -619,6 +644,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -626,6 +652,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -649,6 +676,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -657,6 +685,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -668,6 +697,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -675,6 +705,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -691,6 +722,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -698,6 +730,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -712,6 +745,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -719,6 +753,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -726,6 +761,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -753,6 +789,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -775,6 +812,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -791,6 +829,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -813,6 +852,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -829,6 +869,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -863,6 +904,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -890,6 +932,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -906,6 +949,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -922,6 +966,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -970,6 +1015,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -997,6 +1043,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1035,6 +1082,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1051,6 +1099,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1067,6 +1116,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1115,6 +1165,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1133,6 +1184,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1162,6 +1214,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1184,6 +1237,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.yml b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.yml index 8fa4d3521..f65b1b29c 100644 --- a/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.yml +++ b/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.yml @@ -3,6 +3,4 @@ dependencies: - pip: - azureml-sdk - azureml-widgets - - tensorflow - - keras<=2.3.1 - matplotlib diff --git a/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb index 3d93e31f2..4c790438e 100644 --- a/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb +++ b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb @@ -97,7 +97,7 @@ "metadata": {}, "source": [ "## Create or attach existing AmlCompute\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n", + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `Standard_NC6s_v3` GPU cluster that autoscales from `0` to `4` nodes.\n", "\n", "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", "\n", @@ -123,7 +123,7 @@ " print('Found existing compute target.')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3',\n", " max_nodes=4)\n", "\n", " # create the cluster\n", @@ -293,7 +293,7 @@ "source": [ "from azureml.core import Environment\n", "\n", - "pytorch_env = Environment.get(ws, name='AzureML-PyTorch-1.6-GPU')" + "pytorch_env = Environment.get(ws, name='azureml-acpt-pytorch-1.11-cuda11.3')" ] }, { @@ -323,7 +323,7 @@ "To use the per-process launch option in which Azure ML will handle launching each of the processes to run your training script,\n", "\n", "1. Specify the training script and arguments\n", - "2. Create a `PyTorchConfiguration` and specify `node_count` and `process_count`. The `process_count` is the total number of processes you want to run for the job; this should typically equal the # of GPUs available on each node multiplied by the # of nodes. Since this tutorial uses the `STANDARD_NC6` SKU, which has one GPU, the total process count for a 2-node job is `2`. If you are using a SKU with >1 GPUs, adjust the `process_count` accordingly.\n", + "2. Create a `PyTorchConfiguration` and specify `node_count` and `process_count`. The `process_count` is the total number of processes you want to run for the job; this should typically equal the # of GPUs available on each node multiplied by the # of nodes. Since this tutorial uses the `Standard_NC6s_v3` SKU, which has one GPU, the total process count for a 2-node job is `2`. If you are using a SKU with >1 GPUs, adjust the `process_count` accordingly.\n", "\n", "Azure ML will set the `MASTER_ADDR`, `MASTER_PORT`, `NODE_RANK`, `WORLD_SIZE` environment variables on each node, in addition to the process-level `RANK` and `LOCAL_RANK` environment variables, that are needed for distributed PyTorch training." ] diff --git a/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb index 91acafb9d..7d0190396 100644 --- a/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb +++ b/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb @@ -97,7 +97,7 @@ "metadata": {}, "source": [ "## Create or attach existing AmlCompute\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n", + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `Standard_NC6s_v3` GPU cluster that autoscales from `0` to `4` nodes.\n", "\n", "> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n", "\n", @@ -123,7 +123,7 @@ " print('Found existing compute target.')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3',\n", " max_nodes=4)\n", "\n", " # create the cluster\n", diff --git a/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb b/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb index 96f430dc9..ee54b088f 100644 --- a/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb +++ b/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -10,6 +11,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -17,6 +19,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -28,6 +31,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -48,6 +52,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -71,6 +76,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -94,6 +100,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -124,7 +131,7 @@ " print('Found existing compute target.')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3', \n", " max_nodes=4)\n", "\n", " # create the cluster\n", @@ -137,6 +144,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -144,6 +152,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -152,6 +161,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -172,6 +182,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -180,6 +191,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -204,6 +216,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -222,6 +235,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -242,32 +256,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### Create an environment\n", "\n", - "Define a conda environment YAML file with your training script dependencies and create an Azure ML environment." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile conda_dependencies.yml\n", - "\n", - "channels:\n", - "- conda-forge\n", - "- pytorch\n", - "dependencies:\n", - "- python=3.8.12\n", - "- pip=21.3.1\n", - "- pytorch::pytorch==1.8.1\n", - "- pytorch::torchvision==0.9.1\n", - "- pip:\n", - " - azureml-defaults" + "Create an Azure ML environment." ] }, { @@ -278,14 +273,11 @@ "source": [ "from azureml.core import Environment\n", "\n", - "pytorch_env = Environment.from_conda_specification(name = 'pytorch-1.6-gpu', file_path = './conda_dependencies.yml')\n", - "\n", - "# Specify a GPU base image\n", - "pytorch_env.docker.enabled = True\n", - "pytorch_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.1-cudnn8-ubuntu20.04'" + "pytorch_env = Environment.get(ws, name='azureml-acpt-pytorch-1.11-cuda11.3')" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -310,6 +302,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -338,6 +331,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -357,6 +351,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -373,6 +368,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -381,6 +377,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -417,6 +414,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -434,6 +432,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -451,6 +450,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -476,6 +476,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -517,6 +518,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -534,6 +536,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -542,6 +545,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -555,6 +559,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -564,6 +569,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -598,6 +604,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -605,6 +612,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -621,6 +629,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -637,6 +646,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -695,6 +705,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb b/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb index 02788b11c..37c931057 100644 --- a/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb +++ b/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -10,6 +11,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -17,6 +19,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -25,6 +28,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -32,6 +36,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -51,6 +56,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -58,6 +64,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -76,6 +83,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -83,6 +91,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -105,6 +114,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -112,6 +122,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -123,6 +134,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -164,6 +176,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -171,6 +184,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -178,6 +192,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -185,6 +200,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -192,6 +208,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -211,6 +228,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -218,6 +236,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -258,6 +277,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -265,6 +285,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -284,27 +305,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### Create an environment\n", "\n", - "Define a conda environment YAML file with your training script dependencies and create an Azure ML environment." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile conda_dependencies.yml\n", - "\n", - "dependencies:\n", - "- python=3.6.2\n", - "- scikit-learn\n", - "- pip:\n", - " - azureml-defaults" + "Create an Azure ML environment." ] }, { @@ -315,10 +322,11 @@ "source": [ "from azureml.core import Environment\n", "\n", - "sklearn_env = Environment.from_conda_specification(name = 'sklearn-env', file_path = './conda_dependencies.yml')" + "sklearn_env = Environment.get(ws, name='azureml-sklearn-1.0')" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -343,6 +351,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -350,6 +359,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -366,6 +376,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -373,6 +384,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -400,6 +412,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -407,6 +420,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -414,6 +428,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -421,6 +436,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -454,6 +470,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -471,6 +488,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -478,6 +496,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -512,6 +531,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -539,6 +559,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -555,6 +576,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb index 1ee4771cb..7d9cdd79b 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -10,6 +11,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -17,6 +19,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -25,6 +28,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -49,6 +53,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -72,6 +77,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -95,6 +101,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -125,7 +132,7 @@ " print('Found existing compute target')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3', \n", " max_nodes=4)\n", "\n", " # create the cluster\n", @@ -138,6 +145,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -145,6 +153,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -152,6 +161,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -159,6 +169,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -179,6 +190,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -195,10 +207,11 @@ "source": [ "from azureml.core import Environment\n", "\n", - "tf_env = Environment.get(ws, name='AzureML-tensorflow-2.7-ubuntu20.04-py38-cuda11-gpu')" + "tf_env = Environment.get(ws, name='azureml-tensorflow-2.11-cuda11')" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -226,6 +239,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -245,6 +259,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -263,6 +278,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb index 013544806..0aa915838 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -10,6 +11,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -17,6 +19,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -25,6 +28,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -65,6 +69,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -88,6 +93,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -109,6 +115,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -139,6 +146,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -171,6 +179,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -215,6 +224,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -238,6 +248,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -266,6 +277,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -276,10 +288,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_NC6` GPU VMs. This process is broken down into 3 steps:\n", + "If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `Standard_NC6s_v3` GPU VMs. This process is broken down into 3 steps:\n", "1. create the configuration (this step is local and only takes a second)\n", "2. create the cluster (this step will take about **20 seconds**)\n", "3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell" @@ -302,7 +315,7 @@ " print('Found existing compute target')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3',\n", " max_nodes=4)\n", "\n", " # create the cluster\n", @@ -317,6 +330,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -335,6 +349,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -358,6 +373,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -372,6 +388,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -391,6 +408,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -408,6 +426,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -424,10 +443,11 @@ "source": [ "from azureml.core import Environment\n", "\n", - "tf_env = Environment.get(ws, name='AzureML-tensorflow-2.6-ubuntu20.04-py38-cuda11-gpu')" + "tf_env = Environment.get(ws, name='azureml-tensorflow-2.11-cuda11')" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -457,6 +477,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -484,6 +505,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -494,6 +516,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -519,6 +542,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -541,6 +565,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -558,6 +583,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -581,6 +607,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -597,6 +624,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -631,6 +659,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -648,6 +677,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -664,6 +694,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -680,6 +711,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -710,6 +742,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -735,6 +768,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -753,6 +787,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -783,6 +818,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -808,6 +844,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -833,6 +870,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb index aaff556fd..e536ccbe8 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -10,6 +11,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -17,6 +19,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -39,6 +42,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -79,6 +83,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -102,6 +107,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -123,6 +129,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -153,6 +160,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -186,6 +194,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -229,6 +238,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -252,6 +262,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -283,6 +294,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -293,10 +305,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `STANDARD_NC6` GPU VMs. This process is broken down into 3 steps:\n", + "If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `Standard_NC6s_v3` GPU VMs. This process is broken down into 3 steps:\n", "1. create the configuration (this step is local and only takes a second)\n", "2. create the cluster (this step will take about **20 seconds**)\n", "3. provision the VMs to bring the cluster to the initial size (of 1 in this case). This step will take about **3-5 minutes** and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell" @@ -319,7 +332,7 @@ " print('Found existing compute target')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3', \n", " max_nodes=4)\n", "\n", " # create the cluster\n", @@ -334,6 +347,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -352,6 +366,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -375,6 +390,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nbpresent": { @@ -389,6 +405,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -408,6 +425,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -425,6 +443,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -441,10 +460,11 @@ "source": [ "from azureml.core import Environment\n", "\n", - "tf_env = Environment.get(ws, name='AzureML-tensorflow-2.6-ubuntu20.04-py38-cuda11-gpu')" + "tf_env = Environment.get(ws, name='azureml-tensorflow-2.11-cuda11')" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -475,6 +495,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -492,6 +513,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -522,6 +544,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -547,6 +570,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -587,6 +611,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -616,6 +641,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -623,6 +649,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -639,6 +666,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -649,6 +677,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -657,6 +686,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -669,6 +699,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -676,6 +707,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -683,6 +715,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -710,6 +743,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -732,6 +766,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -748,6 +783,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -770,6 +806,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -786,6 +823,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -820,6 +858,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -846,6 +885,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -862,6 +902,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -878,6 +919,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -924,6 +966,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -950,6 +993,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -987,6 +1031,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1003,6 +1048,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1019,6 +1065,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1067,6 +1114,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1096,6 +1144,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1118,6 +1167,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.yml b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.yml index 76b7eabcf..43575efe0 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.yml +++ b/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.yml @@ -6,7 +6,5 @@ dependencies: - azureml-sdk - azureml-widgets - pandas - - keras - - tensorflow==2.0.0 - matplotlib - fuse diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/tf_mnist_with_checkpoint.py b/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/tf_mnist_with_checkpoint.py index 598c4b08b..606987680 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/tf_mnist_with_checkpoint.py +++ b/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/tf_mnist_with_checkpoint.py @@ -5,7 +5,7 @@ import argparse import os import re -import tensorflow as tf +import tensorflow.compat.v1 as tf import glob from azureml.core import Run @@ -41,8 +41,8 @@ recursive=True)[0], True).reshape(-1) print(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep='\n') - training_set_size = X_train.shape[0] +tf.disable_v2_behavior() n_inputs = 28 * 28 n_h1 = 100 diff --git a/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb b/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb index bb14af030..05997873f 100644 --- a/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb +++ b/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -10,6 +11,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -17,6 +19,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -25,6 +28,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -49,6 +53,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -72,6 +77,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -95,6 +101,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -125,7 +132,7 @@ " print('Found existing compute target.')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3', \n", " max_nodes=4)\n", "\n", " # create the cluster\n", @@ -138,6 +145,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -145,6 +153,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -169,6 +178,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -208,6 +218,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -215,6 +226,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -234,6 +246,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -256,6 +269,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -276,6 +290,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -292,10 +307,11 @@ "source": [ "from azureml.core import Environment\n", "\n", - "tf_env = Environment.get(ws, name='AzureML-TensorFlow-1.13-GPU')" + "tf_env = Environment.get(ws, name='azureml-tensorflow-2.11-cuda11')" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -322,6 +338,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -340,6 +357,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -358,6 +376,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -374,6 +393,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -381,6 +401,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -401,6 +422,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -428,6 +450,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/how-to-use-azureml/ml-frameworks/using-mlflow/train-and-deploy-pytorch/train-and-deploy-pytorch.ipynb b/how-to-use-azureml/ml-frameworks/using-mlflow/train-and-deploy-pytorch/train-and-deploy-pytorch.ipynb index 7dbe4b1cc..e829525f7 100644 --- a/how-to-use-azureml/ml-frameworks/using-mlflow/train-and-deploy-pytorch/train-and-deploy-pytorch.ipynb +++ b/how-to-use-azureml/ml-frameworks/using-mlflow/train-and-deploy-pytorch/train-and-deploy-pytorch.ipynb @@ -164,7 +164,7 @@ "source": [ "from azureml.core import Environment\n", "\n", - "env = Environment.get(workspace=ws, name=\"AzureML-PyTorch-1.4-GPU\").clone(\"mlflow-env\")\n", + "env = Environment.get(workspace=ws, name=\"azureml-acpt-pytorch-1.11-cuda11.3\").clone(\"mlflow-env\")\n", "\n", "env.python.conda_dependencies.add_pip_package(\"azureml-mlflow\")\n", "env.python.conda_dependencies.add_pip_package(\"Pillow==6.0.0\")" diff --git a/how-to-use-azureml/reinforcement-learning/README.md b/how-to-use-azureml/reinforcement-learning/README.md index cb775c306..49803b0cd 100644 --- a/how-to-use-azureml/reinforcement-learning/README.md +++ b/how-to-use-azureml/reinforcement-learning/README.md @@ -44,7 +44,7 @@ To make use of these samples, you need the following. * A Microsoft Azure subscription. * A Microsoft Azure resource group. * An Azure Machine Learning Workspace in the resource group. -* Azure Machine Learning training compute. These samples use the VM sizes `STANDARD_NC6` and `STANDARD_D2_V2`. If these are not available in your region, +* Azure Machine Learning training compute. These samples use the VM sizes `Standard_NC6s_v3` and `STANDARD_D2_V2`. If these are not available in your region, you can replace them with other sizes. * A virtual network set up in the resource group for samples that use multiple compute targets. The Cartpole and Multi-agent Particle examples do not need a virtual network. Any network security group defined on the virtual network must allow network traffic on ports used by Azure infrastructure services. Sample instructions are provided in Atari Pong and Minecraft example notebooks. diff --git a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/networkutils.py b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/networkutils.py deleted file mode 100644 index 64af7d8ba..000000000 --- a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/files/networkutils.py +++ /dev/null @@ -1,237 +0,0 @@ -import sys -import csv -from azure.mgmt.network import NetworkManagementClient - - -def check_port_in_port_range(expected_port: str, - dest_port_range: str): - """ - Check if a port is within a port range - Port range maybe like *, 8080 or 8888-8889 - """ - - if dest_port_range == '*': - return True - - dest_ports = dest_port_range.split('-') - - if len(dest_ports) == 1 and \ - int(dest_ports[0]) == int(expected_port): - return True - - if len(dest_ports) == 2 and \ - int(dest_ports[0]) <= int(expected_port) and \ - int(dest_ports[1]) >= int(expected_port): - return True - - return False - - -def check_port_in_destination_port_ranges(expected_port: str, - dest_port_ranges: list): - """ - Check if a port is within a given list of port ranges - i.e. check if port 8080 is in port ranges of 22,80,8080-8090,443 - """ - - for dest_port_range in dest_port_ranges: - if check_port_in_port_range(expected_port, dest_port_range) is True: - return True - - return False - - -def check_ports_in_destination_port_ranges(expected_ports: list, - dest_port_ranges: list): - """ - Check if all ports in a given port list are within a given list - of port ranges - i.e. check if port 8080,8081 are in port ranges of 22,80,8080-8090,443 - """ - - for expected_port in expected_ports: - if check_port_in_destination_port_ranges( - expected_port, dest_port_ranges) is False: - return False - - return True - - -def check_source_address_prefix(source_address_prefix: str): - """Check if source address prefix is BatchNodeManagement or default""" - - required_prefix = 'BatchNodeManagement' - default_prefix = 'default' - - if source_address_prefix.lower() == required_prefix.lower() or \ - source_address_prefix.lower() == default_prefix.lower(): - return True - - return False - - -def check_protocol(protocol: str): - """Check if protocol is supported - Tcp/Any""" - - required_protocol = 'Tcp' - any_protocol = 'Any' - - if required_protocol.lower() == protocol.lower() or \ - any_protocol.lower() == protocol.lower(): - return True - - return False - - -def check_direction(direction: str): - """Check if port direction is inbound""" - - required_direction = 'Inbound' - - if required_direction.lower() == direction.lower(): - return True - - return False - - -def check_provisioning_state(provisioning_state: str): - """Check if the provisioning state is succeeded""" - - required_provisioning_state = 'Succeeded' - - if required_provisioning_state.lower() == provisioning_state.lower(): - return True - - return False - - -def check_rule_for_Azure_ML(rule): - """Check if the ports required for Azure Machine Learning are open""" - - required_ports = ['29876', '29877'] - - if check_source_address_prefix(rule.source_address_prefix) is False: - return False - - if check_protocol(rule.protocol) is False: - return False - - if check_direction(rule.direction) is False: - return False - - if check_provisioning_state(rule.provisioning_state) is False: - return False - - if rule.destination_port_range is not None: - if check_ports_in_destination_port_ranges( - required_ports, - [rule.destination_port_range]) is False: - return False - else: - if check_ports_in_destination_port_ranges( - required_ports, - rule.destination_port_ranges) is False: - return False - - return True - - -def check_vnet_security_rules(auth_object, - vnet_subscription_id, - vnet_resource_group, - vnet_name, - save_to_file=False): - """ - Check all the rules of virtual network if required ports for Azure Machine - Learning are open - """ - - network_client = NetworkManagementClient( - auth_object, - vnet_subscription_id) - - # get the vnet - vnet = network_client.virtual_networks.get( - resource_group_name=vnet_resource_group, - virtual_network_name=vnet_name) - - vnet_location = vnet.location - vnet_info = [] - - if vnet.subnets is None or len(vnet.subnets) == 0: - print('WARNING: No subnet found for VNet:', vnet_name) - - # for each subnet of the vnet - for subnet in vnet.subnets: - if subnet.network_security_group is None: - print('WARNING: No network security group found for subnet.', - 'Subnet', - subnet.id.split("/")[-1]) - else: - # get all the rules - network_security_group_name = \ - subnet.network_security_group.id.split("/")[-1] - network_security_group_resource_group_name = \ - subnet.network_security_group.id.split("/")[4] - network_security_group_subscription_id = \ - subnet.network_security_group.id.split("/")[2] - - security_rules = list(network_client.security_rules.list( - network_security_group_resource_group_name, - network_security_group_name)) - - rule_matched = None - for rule in security_rules: - rule_info = [] - # add vnet details - rule_info.append(vnet_name) - rule_info.append(vnet_subscription_id) - rule_info.append(vnet_resource_group) - rule_info.append(vnet_location) - # add subnet details - rule_info.append(subnet.id.split("/")[-1]) - rule_info.append(network_security_group_name) - rule_info.append(network_security_group_subscription_id) - rule_info.append(network_security_group_resource_group_name) - # add rule details - rule_info.append(rule.priority) - rule_info.append(rule.name) - rule_info.append(rule.source_address_prefix) - if rule.destination_port_range is not None: - rule_info.append(rule.destination_port_range) - else: - rule_info.append(rule.destination_port_ranges) - rule_info.append(rule.direction) - rule_info.append(rule.provisioning_state) - vnet_info.append(rule_info) - - if check_rule_for_Azure_ML(rule) is True: - rule_matched = rule - - if rule_matched is not None: - print("INFORMATION: Rule matched with required ports. Subnet:", - subnet.id.split("/")[-1], "Rule:", rule.name) - else: - print("WARNING: No rule matched with required ports. Subnet:", - subnet.id.split("/")[-1]) - - if save_to_file is True: - file_name = vnet_name + ".csv" - with open(file_name, mode='w') as vnet_rule_file: - vnet_rule_file_writer = csv.writer( - vnet_rule_file, - delimiter=',', - quotechar='"', - quoting=csv.QUOTE_MINIMAL) - header = ['VNet_Name', 'VNet_Subscription_ID', - 'VNet_Resource_Group', 'VNet_Location', - 'Subnet_Name', 'NSG_Name', - 'NSG_Subscription_ID', 'NSG_Resource_Group', - 'Rule_Priority', 'Rule_Name', 'Rule_Source', - 'Rule_Destination_Ports', 'Rule_Direction', - 'Rule_Provisioning_State'] - vnet_rule_file_writer.writerow(header) - vnet_rule_file_writer.writerows(vnet_info) - - print("INFORMATION: Network security group rules for your virtual \ -network are saved in file", file_name) diff --git a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb index e7c748b7f..84c9744c5 100644 --- a/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb +++ b/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb @@ -166,7 +166,7 @@ "compute_max_nodes = 2\n", "\n", "# This example uses GPU VM.\n", - "vm_size = 'STANDARD_NC6'\n", + "vm_size = 'Standard_NC6s_v3'\n", "\n", "if compute_name in ws.compute_targets:\n", " compute_target = ws.compute_targets[compute_name]\n", diff --git a/how-to-use-azureml/responsible-ai/visualize-upload-loan-decision/rai-loan-decision.yml b/how-to-use-azureml/responsible-ai/visualize-upload-loan-decision/rai-loan-decision.yml index 8f7831378..6e4342995 100644 --- a/how-to-use-azureml/responsible-ai/visualize-upload-loan-decision/rai-loan-decision.yml +++ b/how-to-use-azureml/responsible-ai/visualize-upload-loan-decision/rai-loan-decision.yml @@ -4,11 +4,11 @@ dependencies: - azureml-sdk - azureml-interpret - azureml-contrib-fairness - - fairlearn>=0.6.2 + - fairlearn>=0.6.2,<=0.7.0 - matplotlib - azureml-dataset-runtime - ipywidgets - - raiwidgets~=0.26.0 + - raiwidgets~=0.28.0 - liac-arff - packaging>=20.9 - itsdangerous==2.0.1 diff --git a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb index 98ec90e5c..ca30cb169 100644 --- a/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb +++ b/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb @@ -101,7 +101,7 @@ "\n", "# Check core SDK version number\n", "\n", - "print(\"This notebook was created using SDK version 1.51.0, you are currently running version\", azureml.core.VERSION)" + "print(\"This notebook was created using SDK version 1.53.0, you are currently running version\", azureml.core.VERSION)" ] }, { diff --git a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.yml b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.yml deleted file mode 100644 index 22fa3c106..000000000 --- a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.yml +++ /dev/null @@ -1,10 +0,0 @@ -name: export-run-history-to-tensorboard -dependencies: -- pip: - - azureml-sdk - - azureml-tensorboard - - tensorflow - - tqdm - - scipy - - scikit-learn - - setuptools>=41.0.0 diff --git a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.yml b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.yml index 0aaf52f68..003dd69e2 100644 --- a/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.yml +++ b/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.yml @@ -4,5 +4,4 @@ dependencies: - azureml-sdk - azureml-tensorboard - tensorboard - - tensorflow - setuptools>=41.0.0 diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb index f19c258b8..896981f03 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb @@ -147,7 +147,7 @@ " print('Found existing compute target')\n", "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='Standard_NC6s_v3', \n", " max_nodes=4)\n", "\n", " # create the cluster\n", diff --git a/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb b/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb index 65097a807..31ebab176 100644 --- a/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb +++ b/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb @@ -121,7 +121,7 @@ "compute_min_nodes = os.environ.get('AML_COMPUTE_CLUSTER_MIN_NODES', 0)\n", "compute_max_nodes = os.environ.get('AML_COMPUTE_CLUSTER_MAX_NODES', 4)\n", "\n", - "# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6\n", + "# This example uses CPU VM. For using GPU VM, set SKU to Standard_NC6s_v3\n", "vm_size = os.environ.get('AML_COMPUTE_CLUSTER_SKU', 'STANDARD_D2_V2')\n", "\n", "\n", diff --git a/index.md b/index.md index a32e37276..2aba78db1 100644 --- a/index.md +++ b/index.md @@ -9,74 +9,74 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an |Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags | |:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:| -| [Using Azure ML environments](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/using-environments/using-environments.ipynb) | Creating and registering environments | None | Local | None | None | None | +| [Using Azure ML environments](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/using-environments/using-environments.ipynb) | Creating and registering environments | None | Local | None | None | None | ## Tutorials |Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags | |:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:| -| [Forecasting BikeShare Demand](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb) | Forecasting | BikeShare | Remote | None | Azure ML AutoML | Forecasting | -| [Forecasting orange juice sales with deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb) | Forecasting | Orange Juice Sales | Remote | Azure Container Instance | Azure ML AutoML | None | -| [Forecasting orange juice sales with deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/auto-ml-forecasting-pipelines.ipynb) | Forecasting | Orange Juice Sales | Remote | Azure Container Instance | Azure ML AutoML | None | -| [Register a model and deploy locally](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local.ipynb) | Deployment | None | Local | Local | None | None | -| :star:[Data drift quickdemo](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/work-with-data/datadrift-tutorial/datadrift-tutorial.ipynb) | Filtering | NOAA | Remote | None | Azure ML | Dataset, Timeseries, Drift | -| :star:[Datasets with ML Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb) | Train | Fashion MNIST | Remote | None | Azure ML | Dataset, Pipeline, Estimator, ScriptRun | -| :star:[Filtering data using Tabular Timeseiries Dataset related API](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.ipynb) | Filtering | NOAA | Local | None | Azure ML | Dataset, Tabular Timeseries | -| :star:[Train with Datasets (Tabular and File)](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb) | Train | Iris, Diabetes | Remote | None | Azure ML | Dataset, Estimator, ScriptRun | -| [Forecasting away from training data](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb) | Forecasting | None | Remote | None | Azure ML AutoML | Forecasting, Confidence Intervals | -| [Automated ML run with basic edition features.](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb) | Classification | Bankmarketing | AML | ACI | None | featurization, explainability, remote_run, AutomatedML | -| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb) | Classification | Creditcard | AML Compute | None | None | remote_run, AutomatedML | -| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb) | Classification | Creditcard | AML Compute | None | None | AutomatedML | -| [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML | -| [auto-ml-forecasting-backtest-single-model](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb) | | None | Remote | None | Azure ML AutoML | | -| :star:[Azure Machine Learning Pipeline with DataTranferStep](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-data-transfer.ipynb) | Demonstrates the use of DataTranferStep | Custom | ADF | None | Azure ML | None | -| [Getting Started with Azure Machine Learning Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) | Getting Started notebook for ANML Pipelines | Custom | AML Compute | None | Azure ML | None | -| [Azure Machine Learning Pipeline with AzureBatchStep](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb) | Demonstrates the use of AzureBatchStep | Custom | Azure Batch | None | Azure ML | None | -| :star:[How to use ModuleStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-modulestep.ipynb) | Demonstrates the use of ModuleStep | Custom | AML Compute | None | Azure ML | None | -| :star:[How to use Pipeline Drafts to create a Published Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-pipeline-drafts.ipynb) | Demonstrates the use of Pipeline Drafts | Custom | AML Compute | None | Azure ML | None | -| :star:[Azure Machine Learning Pipeline with HyperDriveStep](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb) | Demonstrates the use of HyperDriveStep | Custom | AML Compute | None | Azure ML | None | -| :star:[How to Publish a Pipeline and Invoke the REST endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-publish-and-run-using-rest-endpoint.ipynb) | Demonstrates the use of Published Pipelines | Custom | AML Compute | None | Azure ML | None | -| :star:[How to Setup a Schedule for a Published Pipeline or Pipeline Endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb) | Demonstrates the use of Schedules for Published Pipelines and Pipeline endpoints | Custom | AML Compute | None | Azure ML | None | -| [How to setup a versioned Pipeline Endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-versioned-pipeline-endpoints.ipynb) | Demonstrates the use of PipelineEndpoint to run a specific version of the Published Pipeline | Custom | AML Compute | None | Azure ML | None | -| :star:[How to use DataPath as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb) | Demonstrates the use of DataPath as a PipelineParameter | Custom | AML Compute | None | Azure ML | None | -| :star:[How to use Dataset as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb) | Demonstrates the use of Dataset as a PipelineParameter | Custom | AML Compute | None | Azure ML | None | -| [How to use AdlaStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-adla-as-compute-target.ipynb) | Demonstrates the use of AdlaStep | Custom | Azure Data Lake Analytics | None | Azure ML | None | -| :star:[How to use DatabricksStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb) | Demonstrates the use of DatabricksStep | Custom | Azure Databricks | None | Azure ML, Azure Databricks | None | -| :star:[How to use KustoStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-kusto-as-compute-target.ipynb) | Demonstrates the use of KustoStep | Custom | Kusto | None | Azure ML, Kusto | None | -| :star:[How to use AutoMLStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb) | Demonstrates the use of AutoMLStep | Custom | AML Compute | None | Automated Machine Learning | None | -| [Azure Machine Learning Pipeline with CommandStep for R](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep-r.ipynb) | Demonstrates the use of CommandStep for running R scripts | Custom | AML Compute | None | Azure ML | None | -| [Azure Machine Learning Pipeline with CommandStep](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep.ipynb) | Demonstrates the use of CommandStep | Custom | AML Compute | None | Azure ML | None | -| :star:[Azure Machine Learning Pipelines with Data Dependency](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb) | Demonstrates how to construct a Pipeline with data dependency between steps | Custom | AML Compute | None | Azure ML | None | -| [How to use run a notebook as a step in AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb) | Demonstrates the use of NotebookRunnerStep | Custom | AML Compute | None | Azure ML | None | -| [Use MLflow with Azure Machine Learning to Train and Deploy Keras Image Classifier](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/using-mlflow/train-and-deploy-keras-auto-logging/train-and-deploy-keras-auto-logging.ipynb) | Use MLflow with Azure Machine Learning to Train and Deploy Keras Image Classifier, leveraging MLflow auto logging | MNIST | Local, AML Compute | Azure Container Instance | Keras | mlflow, keras | -| [Use MLflow with Azure Machine Learning to Train and Deploy PyTorch Image Classifier](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/using-mlflow/train-and-deploy-pytorch/train-and-deploy-pytorch.ipynb) | Use MLflow with Azure Machine Learning to train and deploy PyTorch image classifier model | MNIST | Local, AML Compute | Azure Container Instance | PyTorch | mlflow, pytorch | -| [Use MLflow projects with Azure Machine Learning to train a model with local compute](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-local/train-projects-local.ipynb) | Use MLflow projects with Azure Machine Learning to train a model using local compute | | Local | | ScikitLearn | mlflow, scikit | -| [Use MLflow projects with Azure Machine Learning to train a model](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-remote/train-projects-remote.ipynb) | Use MLflow projects with Azure Machine Learning to train a model using azureml compute | | AML Compute | | Scikit | mlflow, scikit | -| [How to use ScriptRun with data input and output](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb) | Demonstrates the use of Scriptrun with datasets | Custom | AML Compute | None | Azure ML | Dataset, ScriptRun | +| [Forecasting BikeShare Demand](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb) | Forecasting | BikeShare | Remote | None | Azure ML AutoML | Forecasting | +| [Forecasting orange juice sales with deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb) | Forecasting | Orange Juice Sales | Remote | Azure Container Instance | Azure ML AutoML | None | +| [Forecasting orange juice sales with deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-pipelines/auto-ml-forecasting-pipelines.ipynb) | Forecasting | Orange Juice Sales | Remote | Azure Container Instance | Azure ML AutoML | None | +| [Register a model and deploy locally](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local.ipynb) | Deployment | None | Local | Local | None | None | +| :star:[Data drift quickdemo](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datadrift-tutorial/datadrift-tutorial.ipynb) | Filtering | NOAA | Remote | None | Azure ML | Dataset, Timeseries, Drift | +| :star:[Datasets with ML Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb) | Train | Fashion MNIST | Remote | None | Azure ML | Dataset, Pipeline, Estimator, ScriptRun | +| :star:[Filtering data using Tabular Timeseiries Dataset related API](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.ipynb) | Filtering | NOAA | Local | None | Azure ML | Dataset, Tabular Timeseries | +| :star:[Train with Datasets (Tabular and File)](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb) | Train | Iris, Diabetes | Remote | None | Azure ML | Dataset, Estimator, ScriptRun | +| [Forecasting away from training data](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb) | Forecasting | None | Remote | None | Azure ML AutoML | Forecasting, Confidence Intervals | +| [Automated ML run with basic edition features.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb) | Classification | Bankmarketing | AML | ACI | None | featurization, explainability, remote_run, AutomatedML | +| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb) | Classification | Creditcard | AML Compute | None | None | remote_run, AutomatedML | +| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb) | Classification | Creditcard | AML Compute | None | None | AutomatedML | +| [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML | +| [auto-ml-forecasting-backtest-single-model](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb) | | None | Remote | None | Azure ML AutoML | | +| :star:[Azure Machine Learning Pipeline with DataTranferStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-data-transfer.ipynb) | Demonstrates the use of DataTranferStep | Custom | ADF | None | Azure ML | None | +| [Getting Started with Azure Machine Learning Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) | Getting Started notebook for ANML Pipelines | Custom | AML Compute | None | Azure ML | None | +| [Azure Machine Learning Pipeline with AzureBatchStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb) | Demonstrates the use of AzureBatchStep | Custom | Azure Batch | None | Azure ML | None | +| :star:[How to use ModuleStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-modulestep.ipynb) | Demonstrates the use of ModuleStep | Custom | AML Compute | None | Azure ML | None | +| :star:[How to use Pipeline Drafts to create a Published Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-pipeline-drafts.ipynb) | Demonstrates the use of Pipeline Drafts | Custom | AML Compute | None | Azure ML | None | +| :star:[Azure Machine Learning Pipeline with HyperDriveStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb) | Demonstrates the use of HyperDriveStep | Custom | AML Compute | None | Azure ML | None | +| :star:[How to Publish a Pipeline and Invoke the REST endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-publish-and-run-using-rest-endpoint.ipynb) | Demonstrates the use of Published Pipelines | Custom | AML Compute | None | Azure ML | None | +| :star:[How to Setup a Schedule for a Published Pipeline or Pipeline Endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb) | Demonstrates the use of Schedules for Published Pipelines and Pipeline endpoints | Custom | AML Compute | None | Azure ML | None | +| [How to setup a versioned Pipeline Endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-versioned-pipeline-endpoints.ipynb) | Demonstrates the use of PipelineEndpoint to run a specific version of the Published Pipeline | Custom | AML Compute | None | Azure ML | None | +| :star:[How to use DataPath as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb) | Demonstrates the use of DataPath as a PipelineParameter | Custom | AML Compute | None | Azure ML | None | +| :star:[How to use Dataset as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb) | Demonstrates the use of Dataset as a PipelineParameter | Custom | AML Compute | None | Azure ML | None | +| [How to use AdlaStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-adla-as-compute-target.ipynb) | Demonstrates the use of AdlaStep | Custom | Azure Data Lake Analytics | None | Azure ML | None | +| :star:[How to use DatabricksStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb) | Demonstrates the use of DatabricksStep | Custom | Azure Databricks | None | Azure ML, Azure Databricks | None | +| :star:[How to use KustoStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-kusto-as-compute-target.ipynb) | Demonstrates the use of KustoStep | Custom | Kusto | None | Azure ML, Kusto | None | +| :star:[How to use AutoMLStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb) | Demonstrates the use of AutoMLStep | Custom | AML Compute | None | Automated Machine Learning | None | +| [Azure Machine Learning Pipeline with CommandStep for R](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep-r.ipynb) | Demonstrates the use of CommandStep for running R scripts | Custom | AML Compute | None | Azure ML | None | +| [Azure Machine Learning Pipeline with CommandStep](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep.ipynb) | Demonstrates the use of CommandStep | Custom | AML Compute | None | Azure ML | None | +| :star:[Azure Machine Learning Pipelines with Data Dependency](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb) | Demonstrates how to construct a Pipeline with data dependency between steps | Custom | AML Compute | None | Azure ML | None | +| [How to use run a notebook as a step in AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb) | Demonstrates the use of NotebookRunnerStep | Custom | AML Compute | None | Azure ML | None | +| [Use MLflow with Azure Machine Learning to Train and Deploy Keras Image Classifier](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/using-mlflow/train-and-deploy-keras-auto-logging/train-and-deploy-keras-auto-logging.ipynb) | Use MLflow with Azure Machine Learning to Train and Deploy Keras Image Classifier, leveraging MLflow auto logging | MNIST | Local, AML Compute | Azure Container Instance | Keras | mlflow, keras | +| [Use MLflow with Azure Machine Learning to Train and Deploy PyTorch Image Classifier](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/using-mlflow/train-and-deploy-pytorch/train-and-deploy-pytorch.ipynb) | Use MLflow with Azure Machine Learning to train and deploy PyTorch image classifier model | MNIST | Local, AML Compute | Azure Container Instance | PyTorch | mlflow, pytorch | +| [Use MLflow projects with Azure Machine Learning to train a model with local compute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-local/train-projects-local.ipynb) | Use MLflow projects with Azure Machine Learning to train a model using local compute | | Local | | ScikitLearn | mlflow, scikit | +| [Use MLflow projects with Azure Machine Learning to train a model](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-remote/train-projects-remote.ipynb) | Use MLflow projects with Azure Machine Learning to train a model using azureml compute | | AML Compute | | Scikit | mlflow, scikit | +| [How to use ScriptRun with data input and output](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb) | Demonstrates the use of Scriptrun with datasets | Custom | AML Compute | None | Azure ML | Dataset, ScriptRun | ## Training |Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags | |:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:| -| [Train a model with a custom Docker image](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/fastai/fastai-with-custom-docker/fastai-with-custom-docker.ipynb) | Train with custom Docker image | Oxford IIIT Pet | AML Compute | None | Pytorch | None | -| [Train a DNN using hyperparameter tuning and deploying with Keras](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb) | Create a multi-class classifier | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | -| [Distributed training with PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb) | Train a model using distributed training via PyTorch DistributedDataParallel | CIFAR-10 | AML Compute | None | PyTorch | None | -| [Distributed PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb) | Train a model using the distributed training via Horovod | MNIST | AML Compute | None | PyTorch | None | -| [Training with hyperparameter tuning using PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) | Train an image classification model using transfer learning with the PyTorch estimator | ImageNet | AML Compute | Azure Container Instance | PyTorch | None | -| [Training and hyperparameter tuning with Scikit-learn](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb) | Train a support vector machine (SVM) to perform classification | Iris | AML Compute | None | Scikit-learn | None | -| [Distributed training using TensorFlow with Horovod](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb) | Use the TensorFlow estimator to train a word2vec model | None | AML Compute | None | TensorFlow | None | -| [Hyperparameter tuning and warm start using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | -| [Training and hyperparameter tuning using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | -| [Resuming a model](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb) | Resume a model in TensorFlow from a previously submitted run | MNIST | AML Compute | None | TensorFlow | None | -| [Using Tensorboard](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb) | Export the run history as Tensorboard logs | None | None | None | TensorFlow | None | -| [Training in Spark](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb) | Submiting a run on a spark cluster | None | HDI cluster | None | PySpark | None | -| [Train on Azure Machine Learning Compute](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb) | Submit a run on Azure Machine Learning Compute. | Diabetes | AML Compute | None | None | None | -| [Train on local compute](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/train-on-local/train-on-local.ipynb) | Train a model locally | Diabetes | Local | None | None | None | -| [Train in a remote Linux virtual machine](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb) | Configure and execute a run | Diabetes | Data Science Virtual Machine | None | None | None | -| [Managing your training runs](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/manage-runs/manage-runs.ipynb) | Monitor and complete runs | None | Local | None | None | None | -| [Tensorboard integration with run history](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.ipynb) | Run a TensorFlow job and view its Tensorboard output live | None | Local, DSVM, AML Compute | None | TensorFlow | None | -| [Use MLflow with AML for a local training run](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-local/train-local.ipynb) | Use MLflow tracking APIs together with Azure Machine Learning for storing your metrics and artifacts | Diabetes | Local | None | None | None | -| [Use MLflow with AML for a remote training run](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train-remote.ipynb) | Use MLflow tracking APIs together with AML for storing your metrics and artifacts | Diabetes | AML Compute | None | None | None | +| [Train a model with a custom Docker image](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/fastai/fastai-with-custom-docker/fastai-with-custom-docker.ipynb) | Train with custom Docker image | Oxford IIIT Pet | AML Compute | None | Pytorch | None | +| [Train a DNN using hyperparameter tuning and deploying with Keras](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb) | Create a multi-class classifier | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | +| [Distributed training with PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb) | Train a model using distributed training via PyTorch DistributedDataParallel | CIFAR-10 | AML Compute | None | PyTorch | None | +| [Distributed PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb) | Train a model using the distributed training via Horovod | MNIST | AML Compute | None | PyTorch | None | +| [Training with hyperparameter tuning using PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) | Train an image classification model using transfer learning with the PyTorch estimator | ImageNet | AML Compute | Azure Container Instance | PyTorch | None | +| [Training and hyperparameter tuning with Scikit-learn](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb) | Train a support vector machine (SVM) to perform classification | Iris | AML Compute | None | Scikit-learn | None | +| [Distributed training using TensorFlow with Horovod](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb) | Use the TensorFlow estimator to train a word2vec model | None | AML Compute | None | TensorFlow | None | +| [Hyperparameter tuning and warm start using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | +| [Training and hyperparameter tuning using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None | +| [Resuming a model](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb) | Resume a model in TensorFlow from a previously submitted run | MNIST | AML Compute | None | TensorFlow | None | +| [Using Tensorboard](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb) | Export the run history as Tensorboard logs | None | None | None | TensorFlow | None | +| [Training in Spark](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb) | Submiting a run on a spark cluster | None | HDI cluster | None | PySpark | None | +| [Train on Azure Machine Learning Compute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb) | Submit a run on Azure Machine Learning Compute. | Diabetes | AML Compute | None | None | None | +| [Train on local compute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-local/train-on-local.ipynb) | Train a model locally | Diabetes | Local | None | None | None | +| [Train in a remote Linux virtual machine](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb) | Configure and execute a run | Diabetes | Data Science Virtual Machine | None | None | None | +| [Managing your training runs](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/manage-runs/manage-runs.ipynb) | Monitor and complete runs | None | Local | None | None | None | +| [Tensorboard integration with run history](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.ipynb) | Run a TensorFlow job and view its Tensorboard output live | None | Local, DSVM, AML Compute | None | TensorFlow | None | +| [Use MLflow with AML for a local training run](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-local/train-local.ipynb) | Use MLflow tracking APIs together with Azure Machine Learning for storing your metrics and artifacts | Diabetes | Local | None | None | None | +| [Use MLflow with AML for a remote training run](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train-remote.ipynb) | Use MLflow tracking APIs together with AML for storing your metrics and artifacts | Diabetes | AML Compute | None | None | None | ## Deployment @@ -84,68 +84,66 @@ Machine Learning notebook samples and encourage efficient retrieval of topics an |Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags | |:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:| -| [Deploy MNIST digit recognition with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb) | Image Classification | MNIST | Local | Azure Container Instance | ONNX | ONNX Model Zoo | -| [Deploy Facial Expression Recognition (FER+) with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.ipynb) | Facial Expression Recognition | Emotion FER | Local | Azure Container Instance | ONNX | ONNX Model Zoo | -| :star:[Register model and deploy as webservice](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/deploy-to-cloud/model-register-and-deploy.ipynb) | Deploy a model with Azure Machine Learning | Diabetes | None | Azure Container Instance | Scikit-learn | None | -| [Train MNIST in PyTorch, convert, and deploy with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-train-pytorch-aml-deploy-mnist.ipynb) | Image Classification | MNIST | AML Compute | Azure Container Instance | ONNX | ONNX Converter | -| [Deploy ResNet50 with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb) | Image Classification | ImageNet | Local | Azure Container Instance | ONNX | ONNX Model Zoo | -| :star:[Convert and deploy TinyYolo with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.ipynb) | Object Detection | PASCAL VOC | local | Azure Container Instance | ONNX | ONNX Converter | -| [Register Spark model and deploy as webservice](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/spark/model-register-and-deploy-spark.ipynb) | | Iris | None | Azure Container Instance | PySpark | | +| [Deploy MNIST digit recognition with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb) | Image Classification | MNIST | Local | Azure Container Instance | ONNX | ONNX Model Zoo | +| [Deploy Facial Expression Recognition (FER+) with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.ipynb) | Facial Expression Recognition | Emotion FER | Local | Azure Container Instance | ONNX | ONNX Model Zoo | +| :star:[Register model and deploy as webservice](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-to-cloud/model-register-and-deploy.ipynb) | Deploy a model with Azure Machine Learning | Diabetes | None | Azure Container Instance | Scikit-learn | None | +| [Train MNIST in PyTorch, convert, and deploy with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-train-pytorch-aml-deploy-mnist.ipynb) | Image Classification | MNIST | AML Compute | Azure Container Instance | ONNX | ONNX Converter | +| [Deploy ResNet50 with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb) | Image Classification | ImageNet | Local | Azure Container Instance | ONNX | ONNX Model Zoo | +| :star:[Convert and deploy TinyYolo with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.ipynb) | Object Detection | PASCAL VOC | local | Azure Container Instance | ONNX | ONNX Converter | ## Other Notebooks |Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags | |:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:| -| [DNN Text Featurization](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb) | Text featurization using DNNs for classification | None | AML Compute | None | None | None | +| [DNN Text Featurization](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb) | Text featurization using DNNs for classification | None | AML Compute | None | None | None | | [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) | | | | | | | -| [fairlearn-azureml-mitigation](https://github.com/Azure/MachineLearningNotebooks/blob/master/contrib/fairness/fairlearn-azureml-mitigation.ipynb) | | | | | | | -| [upload-fairness-dashboard](https://github.com/Azure/MachineLearningNotebooks/blob/master/contrib/fairness/upload-fairness-dashboard.ipynb) | | | | | | | -| [azure-ml-with-nvidia-rapids](https://github.com/Azure/MachineLearningNotebooks/blob/master/contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb) | | | | | | | -| [auto-ml-continuous-retraining](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb) | | | | | | | -| [codegen-for-autofeaturization](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-codegen/codegen-for-autofeaturization.ipynb) | | | | | | | -| [custom-model-training-from-autofeaturization-run](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-custom-model-training/custom-model-training-from-autofeaturization-run.ipynb) | | | | | | | -| [auto-ml-regression-model-proxy](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb) | | | | | | | -| [auto-ml-forecasting-backtest-many-models](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) | | | | | | | -| [auto-ml-forecasting-energy-demand](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb) | | | | | | | -| [auto-ml-forecasting-github-dau](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-github-dau/auto-ml-forecasting-github-dau.ipynb) | | | | | | | -| [auto-ml-forecasting-hierarchical-timeseries](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.ipynb) | | | | | | | -| [auto-ml-forecasting-many-models](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-many-models/auto-ml-forecasting-many-models.ipynb) | | | | | | | -| [auto-ml-forecasting-univariate-recipe-experiment-settings](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-experiment-settings.ipynb) | | | | | | | -| [auto-ml-forecasting-univariate-recipe-run-experiment](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-run-experiment.ipynb) | | | | | | | -| [auto-ml-regression](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb) | | | | | | | -| [automl-databricks-local-01](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-databricks/automl/automl-databricks-local-01.ipynb) | | | | | | | -| [automl-databricks-local-with-deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-databricks/automl/automl-databricks-local-with-deployment.ipynb) | | | | | | | -| [spark_job_on_synapse_spark_pool](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-synapse/spark_job_on_synapse_spark_pool.ipynb) | | | | | | | -| [spark_session_on_synapse_spark_pool](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-synapse/spark_session_on_synapse_spark_pool.ipynb) | | | | | | | -| [Synapse_Job_Scala_Support](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-synapse/Synapse_Job_Scala_Support.ipynb) | | | | | | | -| [Synapse_Session_Scala_Support](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-synapse/Synapse_Session_Scala_Support.ipynb) | | | | | | | -| [multi-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/deploy-multi-model/multi-model-register-and-deploy.ipynb) | | | | | | | -| [register-model-deploy-local-advanced](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local-advanced.ipynb) | | | | | | | -| [enable-app-insights-in-production-service](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb) | | | | | | | -| [onnx-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-model-register-and-deploy.ipynb) | | | | | | | -| [production-deploy-to-aks-ssl](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb) | | | | | | | -| [production-deploy-to-aks](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb) | | | | | | | -| [production-deploy-to-aks-gpu](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.ipynb) | | | | | | | -| [train-explain-model-gpu-tree-explainer](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/explain-model/azure-integration/gpu-explanation/train-explain-model-gpu-tree-explainer.ipynb) | | | | | | | -| [explain-model-on-amlcompute](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb) | | | | | | | -| [save-retrieve-explanations-run-history](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb) | | | | | | | -| [train-explain-model-locally-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb) | | | | | | | -| [train-explain-model-on-amlcompute-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb) | | | | | | | -| [training_notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/notebook_runner/training_notebook.ipynb) | | | | | | | -| [nyc-taxi-data-regression-model-building](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb) | | | | | | | -| [authentication-in-azureml](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/manage-azureml-service/authentication-in-azureml/authentication-in-azureml.ipynb) | | | | | | | -| [pong_rllib](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb) | | | | | | | -| [cartpole_ci](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb) | | | | | | | -| [cartpole_sc](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb) | | | | | | | -| [rai-loan-decision](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/responsible-ai/visualize-upload-loan-decision/rai-loan-decision.ipynb) | | | | | | | -| [Logging APIs](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb) | Logging APIs and analyzing results | None | None | None | None | None | -| [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/setup-environment/configuration.ipynb) | | | | | | | -| [quickstart-azureml-automl](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.ipynb) | | | | | | | -| [quickstart-azureml-in-10mins](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.ipynb) | | | | | | | -| [quickstart-azureml-python-sdk](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.ipynb) | | | | | | | -| [tutorial-1st-experiment-sdk-train](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb) | | | | | | | -| [img-classification-part1-training](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb) | | | | | | | -| [img-classification-part2-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb) | | | | | | | -| [img-classification-part3-deploy-encrypted](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/image-classification-mnist-data/img-classification-part3-deploy-encrypted.ipynb) | | | | | | | -| [tutorial-pipeline-batch-scoring-classification](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb) | | | | | | | -| [regression-automated-ml](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/regression-automl-nyc-taxi-data/regression-automated-ml.ipynb) | | | | | | | +| [fairlearn-azureml-mitigation](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/fairness/fairlearn-azureml-mitigation.ipynb) | | | | | | | +| [upload-fairness-dashboard](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/fairness/upload-fairness-dashboard.ipynb) | | | | | | | +| [azure-ml-with-nvidia-rapids](https://github.com/Azure/MachineLearningNotebooks/blob/master//contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb) | | | | | | | +| [auto-ml-continuous-retraining](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb) | | | | | | | +| [codegen-for-autofeaturization](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-codegen/codegen-for-autofeaturization.ipynb) | | | | | | | +| [custom-model-training-from-autofeaturization-run](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-custom-model-training/custom-model-training-from-autofeaturization-run.ipynb) | | | | | | | +| [auto-ml-regression-model-proxy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb) | | | | | | | +| [auto-ml-forecasting-backtest-many-models](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) | | | | | | | +| [auto-ml-forecasting-energy-demand](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb) | | | | | | | +| [auto-ml-forecasting-github-dau](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-github-dau/auto-ml-forecasting-github-dau.ipynb) | | | | | | | +| [auto-ml-forecasting-hierarchical-timeseries](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.ipynb) | | | | | | | +| [auto-ml-forecasting-many-models](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-many-models/auto-ml-forecasting-many-models.ipynb) | | | | | | | +| [auto-ml-forecasting-univariate-recipe-experiment-settings](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-experiment-settings.ipynb) | | | | | | | +| [auto-ml-forecasting-univariate-recipe-run-experiment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-run-experiment.ipynb) | | | | | | | +| [auto-ml-regression](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb) | | | | | | | +| [automl-databricks-local-01](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/automl/automl-databricks-local-01.ipynb) | | | | | | | +| [automl-databricks-local-with-deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-databricks/automl/automl-databricks-local-with-deployment.ipynb) | | | | | | | +| [spark_job_on_synapse_spark_pool](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-synapse/spark_job_on_synapse_spark_pool.ipynb) | | | | | | | +| [spark_session_on_synapse_spark_pool](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-synapse/spark_session_on_synapse_spark_pool.ipynb) | | | | | | | +| [Synapse_Job_Scala_Support](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-synapse/Synapse_Job_Scala_Support.ipynb) | | | | | | | +| [Synapse_Session_Scala_Support](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/azure-synapse/Synapse_Session_Scala_Support.ipynb) | | | | | | | +| [multi-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-multi-model/multi-model-register-and-deploy.ipynb) | | | | | | | +| [register-model-deploy-local-advanced](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local-advanced.ipynb) | | | | | | | +| [enable-app-insights-in-production-service](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb) | | | | | | | +| [onnx-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/onnx/onnx-model-register-and-deploy.ipynb) | | | | | | | +| [production-deploy-to-aks-ssl](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb) | | | | | | | +| [production-deploy-to-aks](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb) | | | | | | | +| [production-deploy-to-aks-gpu](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.ipynb) | | | | | | | +| [explain-model-on-amlcompute](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb) | | | | | | | +| [save-retrieve-explanations-run-history](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb) | | | | | | | +| [train-explain-model-locally-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb) | | | | | | | +| [train-explain-model-on-amlcompute-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb) | | | | | | | +| [training_notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/notebook_runner/training_notebook.ipynb) | | | | | | | +| [nyc-taxi-data-regression-model-building](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb) | | | | | | | +| [authentication-in-azureml](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/manage-azureml-service/authentication-in-azureml/authentication-in-azureml.ipynb) | | | | | | | +| [pong_rllib](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb) | | | | | | | +| [cartpole_ci](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb) | | | | | | | +| [cartpole_sc](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb) | | | | | | | +| [rai-loan-decision](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/responsible-ai/visualize-upload-loan-decision/rai-loan-decision.ipynb) | | | | | | | +| [Logging APIs](https://github.com/Azure/MachineLearningNotebooks/blob/master//how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb) | Logging APIs and analyzing results | None | None | None | None | None | +| [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master//setup-environment/configuration.ipynb) | | | | | | | +| [quickstart-azureml-automl](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.ipynb) | | | | | | | +| [quickstart-azureml-in-10mins](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.ipynb) | | | | | | | +| [quickstart-azureml-python-sdk](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.ipynb) | | | | | | | +| [tutorial-1st-experiment-sdk-train](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb) | | | | | | | +| [img-classification-part1-training](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb) | | | | | | | +| [img-classification-part2-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb) | | | | | | | +| [img-classification-part3-deploy-encrypted](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/image-classification-mnist-data/img-classification-part3-deploy-encrypted.ipynb) | | | | | | | +| [tutorial-pipeline-batch-scoring-classification](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb) | | | | | | | +| [regression-automated-ml](https://github.com/Azure/MachineLearningNotebooks/blob/master//tutorials/regression-automl-nyc-taxi-data/regression-automated-ml.ipynb) | | | | | | | diff --git a/setup-environment/configuration.ipynb b/setup-environment/configuration.ipynb index af26de079..596674cbb 100644 --- a/setup-environment/configuration.ipynb +++ b/setup-environment/configuration.ipynb @@ -102,7 +102,7 @@ "source": [ "import azureml.core\n", "\n", - "print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n", + "print(\"This notebook was created using version 1.53.0 of the Azure ML SDK\")\n", "print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")" ] }, diff --git a/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb b/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb index a12877d5f..0dff96604 100644 --- a/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb +++ b/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb @@ -157,7 +157,7 @@ "compute_min_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MIN_NODES\", 0)\n", "compute_max_nodes = os.environ.get(\"AML_COMPUTE_CLUSTER_MAX_NODES\", 4)\n", "\n", - "# This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6\n", + "# This example uses CPU VM. For using GPU VM, set SKU to Standard_NC6s_v3\n", "vm_size = os.environ.get(\"AML_COMPUTE_CLUSTER_SKU\", \"STANDARD_D2_V2\")\n", "\n", "\n", diff --git a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb index 9f7c94512..5448a6401 100644 --- a/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb +++ b/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb @@ -244,7 +244,7 @@ "try:\n", " compute_target = ComputeTarget(workspace=ws, name=compute_name)\n", "except ComputeTargetException:\n", - " config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n", + " config = AmlCompute.provisioning_configuration(vm_size=\"Standard_NC6s_v3\",\n", " vm_priority=\"lowpriority\", \n", " min_nodes=0, \n", " max_nodes=2)\n", From 280150713e069a57003829e9fa47d52cb46bf2a7 Mon Sep 17 00:00:00 2001 From: Jeff Shepherd Date: Wed, 23 Aug 2023 10:20:25 -0700 Subject: [PATCH 2/4] Restored V2 message --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d9ebd8206..d3c0581dc 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Azure Machine Learning Python SDK notebooks -> a community-driven repository of examples using mlflow for tracking can be found at https://github.com/Azure/azureml-examples +### **With the introduction of AzureML SDK v2, this samples repository for the v1 SDK is now deprecated and will not be monitored or updated. Users are encouraged to visit the [v2 SDK samples repository](https://github.com/Azure/azureml-examples) instead for up-to-date and enhanced examples of how to build, train, and deploy machine learning models with AzureML's newest features.** Welcome to the Azure Machine Learning Python SDK notebooks repository! From c11e9fc1daac254fefe1a298cb24f44dec70aba7 Mon Sep 17 00:00:00 2001 From: Jeff Shepherd Date: Wed, 23 Aug 2023 11:36:17 -0700 Subject: [PATCH 3/4] Fixed readme syntax --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d3c0581dc..bfced82d8 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Azure Machine Learning Python SDK notebooks -### **With the introduction of AzureML SDK v2, this samples repository for the v1 SDK is now deprecated and will not be monitored or updated. Users are encouraged to visit the [v2 SDK samples repository](https://github.com/Azure/azureml-examples) instead for up-to-date and enhanced examples of how to build, train, and deploy machine learning models with AzureML's newest features.** +** **With the introduction of AzureML SDK v2, this samples repository for the v1 SDK is now deprecated and will not be monitored or updated. Users are encouraged to visit the [v2 SDK samples repository](https://github.com/Azure/azureml-examples) instead for up-to-date and enhanced examples of how to build, train, and deploy machine learning models with AzureML's newest features.** ** Welcome to the Azure Machine Learning Python SDK notebooks repository! From 0c5f6daf52f86a7631d24901718e67850583b9f7 Mon Sep 17 00:00:00 2001 From: Jeff Shepherd Date: Wed, 23 Aug 2023 11:37:30 -0700 Subject: [PATCH 4/4] Fixed readme syntax --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index bfced82d8..ecddfe6a1 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,9 @@ # Azure Machine Learning Python SDK notebooks + ** **With the introduction of AzureML SDK v2, this samples repository for the v1 SDK is now deprecated and will not be monitored or updated. Users are encouraged to visit the [v2 SDK samples repository](https://github.com/Azure/azureml-examples) instead for up-to-date and enhanced examples of how to build, train, and deploy machine learning models with AzureML's newest features.** ** + Welcome to the Azure Machine Learning Python SDK notebooks repository! ## Getting started