Skip to content

Commit b54b256

Browse files
authored
Merge pull request Azure#667 from Azure/sdk-codetest
remove deprecated auto_prepare_environment
2 parents d658c85 + 57b0f70 commit b54b256

File tree

19 files changed

+852
-0
lines changed

19 files changed

+852
-0
lines changed

.amlignore

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
.ipynb_checkpoints
2+
azureml-logs
3+
.azureml
4+
.git
5+
outputs
6+
azureml-setup
7+
docs

.vscode/settings.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
{
2+
"python.pythonPath": "C:\\Users\\sgilley\\.azureml\\envs\\jan3\\python.exe"
3+
}

aml_config/conda_dependencies.yml

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Conda environment specification. The dependencies defined in this file will
2+
# be automatically provisioned for runs with userManagedDependencies=False.
3+
4+
# Details about the Conda environment file format:
5+
# https://conda.io/docs/user-guide/tasks/manage-environments.html#create-env-file-manually
6+
7+
name: project_environment
8+
dependencies:
9+
# The python interpreter version.
10+
# Currently Azure ML only supports 3.5.2 and later.
11+
- python=3.6.2
12+
13+
- pip:
14+
# Required packages for AzureML execution, history, and data preparation.
15+
- azureml-defaults

aml_config/docker.runconfig

Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
# The script to run.
2+
script: train.py
3+
# The arguments to the script file.
4+
arguments: []
5+
# The name of the compute target to use for this run.
6+
target: local
7+
# Framework to execute inside. Allowed values are "Python" , "PySpark", "CNTK", "TensorFlow", and "PyTorch".
8+
framework: PySpark
9+
# Communicator for the given framework. Allowed values are "None" , "ParameterServer", "OpenMpi", and "IntelMpi".
10+
communicator: None
11+
# Automatically prepare the run environment as part of the run itself.
12+
autoPrepareEnvironment: true
13+
# Maximum allowed duration for the run.
14+
maxRunDurationSeconds:
15+
# Number of nodes to use for running job.
16+
nodeCount: 1
17+
# Environment details.
18+
environment:
19+
# Environment variables set for the run.
20+
environmentVariables:
21+
EXAMPLE_ENV_VAR: EXAMPLE_VALUE
22+
# Python details
23+
python:
24+
# user_managed_dependencies=True indicates that the environmentwill be user managed. False indicates that AzureML willmanage the user environment.
25+
userManagedDependencies: false
26+
# The python interpreter path
27+
interpreterPath: python
28+
# Path to the conda dependencies file to use for this run. If a project
29+
# contains multiple programs with different sets of dependencies, it may be
30+
# convenient to manage those environments with separate files.
31+
condaDependenciesFile: aml_config/conda_dependencies.yml
32+
# Docker details
33+
docker:
34+
# Set True to perform this run inside a Docker container.
35+
enabled: true
36+
# Base image used for Docker-based runs.
37+
baseImage: mcr.microsoft.com/azureml/base:0.2.0
38+
# Set False if necessary to work around shared volume bugs.
39+
sharedVolumes: true
40+
# Run with NVidia Docker extension to support GPUs.
41+
gpuSupport: false
42+
# Extra arguments to the Docker run command.
43+
arguments: []
44+
# Image registry that contains the base image.
45+
baseImageRegistry:
46+
# DNS name or IP address of azure container registry(ACR)
47+
address:
48+
# The username for ACR
49+
username:
50+
# The password for ACR
51+
password:
52+
# Spark details
53+
spark:
54+
# List of spark repositories.
55+
repositories:
56+
- https://mmlspark.azureedge.net/maven
57+
packages:
58+
- group: com.microsoft.ml.spark
59+
artifact: mmlspark_2.11
60+
version: '0.12'
61+
precachePackages: true
62+
# Databricks details
63+
databricks:
64+
# List of maven libraries.
65+
mavenLibraries: []
66+
# List of PyPi libraries
67+
pypiLibraries: []
68+
# List of RCran libraries
69+
rcranLibraries: []
70+
# List of JAR libraries
71+
jarLibraries: []
72+
# List of Egg libraries
73+
eggLibraries: []
74+
# History details.
75+
history:
76+
# Enable history tracking -- this allows status, logs, metrics, and outputs
77+
# to be collected for a run.
78+
outputCollection: true
79+
# whether to take snapshots for history.
80+
snapshotProject: true
81+
# Spark configuration details.
82+
spark:
83+
configuration:
84+
spark.app.name: Azure ML Experiment
85+
spark.yarn.maxAppAttempts: 1
86+
# HDI details.
87+
hdi:
88+
# Yarn deploy mode. Options are cluster and client.
89+
yarnDeployMode: cluster
90+
# Tensorflow details.
91+
tensorflow:
92+
# The number of worker tasks.
93+
workerCount: 1
94+
# The number of parameter server tasks.
95+
parameterServerCount: 1
96+
# Mpi details.
97+
mpi:
98+
# When using MPI, number of processes per node.
99+
processCountPerNode: 1
100+
# data reference configuration details
101+
dataReferences: {}
102+
# Project share datastore reference.
103+
sourceDirectoryDataStore:
104+
# AmlCompute details.
105+
amlcompute:
106+
# VM size of the Cluster to be created.Allowed values are Azure vm sizes.The list of vm sizes is available in 'https://docs.microsoft.com/en-us/azure/cloud-services/cloud-services-sizes-specs
107+
vmSize:
108+
# VM priority of the Cluster to be created.Allowed values are "dedicated" , "lowpriority".
109+
vmPriority:
110+
# A bool that indicates if the cluster has to be retained after job completion.
111+
retainCluster: false
112+
# Name of the cluster to be created. If not specified, runId will be used as cluster name.
113+
name:
114+
# Maximum number of nodes in the AmlCompute cluster to be created. Minimum number of nodes will always be set to 0.
115+
clusterMaxNodeCount: 1

aml_config/local.runconfig

Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
# The script to run.
2+
script: train.py
3+
# The arguments to the script file.
4+
arguments: []
5+
# The name of the compute target to use for this run.
6+
target: local
7+
# Framework to execute inside. Allowed values are "Python" , "PySpark", "CNTK", "TensorFlow", and "PyTorch".
8+
framework: Python
9+
# Communicator for the given framework. Allowed values are "None" , "ParameterServer", "OpenMpi", and "IntelMpi".
10+
communicator: None
11+
# Automatically prepare the run environment as part of the run itself.
12+
autoPrepareEnvironment: true
13+
# Maximum allowed duration for the run.
14+
maxRunDurationSeconds:
15+
# Number of nodes to use for running job.
16+
nodeCount: 1
17+
# Environment details.
18+
environment:
19+
# Environment variables set for the run.
20+
environmentVariables:
21+
EXAMPLE_ENV_VAR: EXAMPLE_VALUE
22+
# Python details
23+
python:
24+
# user_managed_dependencies=True indicates that the environmentwill be user managed. False indicates that AzureML willmanage the user environment.
25+
userManagedDependencies: false
26+
# The python interpreter path
27+
interpreterPath: python
28+
# Path to the conda dependencies file to use for this run. If a project
29+
# contains multiple programs with different sets of dependencies, it may be
30+
# convenient to manage those environments with separate files.
31+
condaDependenciesFile: aml_config/conda_dependencies.yml
32+
# Docker details
33+
docker:
34+
# Set True to perform this run inside a Docker container.
35+
enabled: false
36+
# Base image used for Docker-based runs.
37+
baseImage: mcr.microsoft.com/azureml/base:0.2.0
38+
# Set False if necessary to work around shared volume bugs.
39+
sharedVolumes: true
40+
# Run with NVidia Docker extension to support GPUs.
41+
gpuSupport: false
42+
# Extra arguments to the Docker run command.
43+
arguments: []
44+
# Image registry that contains the base image.
45+
baseImageRegistry:
46+
# DNS name or IP address of azure container registry(ACR)
47+
address:
48+
# The username for ACR
49+
username:
50+
# The password for ACR
51+
password:
52+
# Spark details
53+
spark:
54+
# List of spark repositories.
55+
repositories:
56+
- https://mmlspark.azureedge.net/maven
57+
packages:
58+
- group: com.microsoft.ml.spark
59+
artifact: mmlspark_2.11
60+
version: '0.12'
61+
precachePackages: true
62+
# Databricks details
63+
databricks:
64+
# List of maven libraries.
65+
mavenLibraries: []
66+
# List of PyPi libraries
67+
pypiLibraries: []
68+
# List of RCran libraries
69+
rcranLibraries: []
70+
# List of JAR libraries
71+
jarLibraries: []
72+
# List of Egg libraries
73+
eggLibraries: []
74+
# History details.
75+
history:
76+
# Enable history tracking -- this allows status, logs, metrics, and outputs
77+
# to be collected for a run.
78+
outputCollection: true
79+
# whether to take snapshots for history.
80+
snapshotProject: true
81+
# Spark configuration details.
82+
spark:
83+
configuration:
84+
spark.app.name: Azure ML Experiment
85+
spark.yarn.maxAppAttempts: 1
86+
# HDI details.
87+
hdi:
88+
# Yarn deploy mode. Options are cluster and client.
89+
yarnDeployMode: cluster
90+
# Tensorflow details.
91+
tensorflow:
92+
# The number of worker tasks.
93+
workerCount: 1
94+
# The number of parameter server tasks.
95+
parameterServerCount: 1
96+
# Mpi details.
97+
mpi:
98+
# When using MPI, number of processes per node.
99+
processCountPerNode: 1
100+
# data reference configuration details
101+
dataReferences: {}
102+
# Project share datastore reference.
103+
sourceDirectoryDataStore:
104+
# AmlCompute details.
105+
amlcompute:
106+
# VM size of the Cluster to be created.Allowed values are Azure vm sizes.The list of vm sizes is available in 'https://docs.microsoft.com/en-us/azure/cloud-services/cloud-services-sizes-specs
107+
vmSize:
108+
# VM priority of the Cluster to be created.Allowed values are "dedicated" , "lowpriority".
109+
vmPriority:
110+
# A bool that indicates if the cluster has to be retained after job completion.
111+
retainCluster: false
112+
# Name of the cluster to be created. If not specified, runId will be used as cluster name.
113+
name:
114+
# Maximum number of nodes in the AmlCompute cluster to be created. Minimum number of nodes will always be set to 0.
115+
clusterMaxNodeCount: 1

aml_config/project.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"Id": "local-compute", "Scope": "/subscriptions/65a1016d-0f67-45d2-b838-b8f373d6d52e/resourceGroups/sheri/providers/Microsoft.MachineLearningServices/workspaces/sheritestqs3/projects/local-compute"}

0 commit comments

Comments
 (0)