|
| 1 | +# The script to run. |
| 2 | +script: train.py |
| 3 | +# The arguments to the script file. |
| 4 | +arguments: [] |
| 5 | +# The name of the compute target to use for this run. |
| 6 | +target: local |
| 7 | +# Framework to execute inside. Allowed values are "Python" , "PySpark", "CNTK", "TensorFlow", and "PyTorch". |
| 8 | +framework: PySpark |
| 9 | +# Communicator for the given framework. Allowed values are "None" , "ParameterServer", "OpenMpi", and "IntelMpi". |
| 10 | +communicator: None |
| 11 | +# Automatically prepare the run environment as part of the run itself. |
| 12 | +autoPrepareEnvironment: true |
| 13 | +# Maximum allowed duration for the run. |
| 14 | +maxRunDurationSeconds: |
| 15 | +# Number of nodes to use for running job. |
| 16 | +nodeCount: 1 |
| 17 | +# Environment details. |
| 18 | +environment: |
| 19 | +# Environment variables set for the run. |
| 20 | + environmentVariables: |
| 21 | + EXAMPLE_ENV_VAR: EXAMPLE_VALUE |
| 22 | +# Python details |
| 23 | + python: |
| 24 | +# user_managed_dependencies=True indicates that the environmentwill be user managed. False indicates that AzureML willmanage the user environment. |
| 25 | + userManagedDependencies: false |
| 26 | +# The python interpreter path |
| 27 | + interpreterPath: python |
| 28 | +# Path to the conda dependencies file to use for this run. If a project |
| 29 | +# contains multiple programs with different sets of dependencies, it may be |
| 30 | +# convenient to manage those environments with separate files. |
| 31 | + condaDependenciesFile: aml_config/conda_dependencies.yml |
| 32 | +# Docker details |
| 33 | + docker: |
| 34 | +# Set True to perform this run inside a Docker container. |
| 35 | + enabled: true |
| 36 | +# Base image used for Docker-based runs. |
| 37 | + baseImage: mcr.microsoft.com/azureml/base:0.2.0 |
| 38 | +# Set False if necessary to work around shared volume bugs. |
| 39 | + sharedVolumes: true |
| 40 | +# Run with NVidia Docker extension to support GPUs. |
| 41 | + gpuSupport: false |
| 42 | +# Extra arguments to the Docker run command. |
| 43 | + arguments: [] |
| 44 | +# Image registry that contains the base image. |
| 45 | + baseImageRegistry: |
| 46 | +# DNS name or IP address of azure container registry(ACR) |
| 47 | + address: |
| 48 | +# The username for ACR |
| 49 | + username: |
| 50 | +# The password for ACR |
| 51 | + password: |
| 52 | +# Spark details |
| 53 | + spark: |
| 54 | +# List of spark repositories. |
| 55 | + repositories: |
| 56 | + - https://mmlspark.azureedge.net/maven |
| 57 | + packages: |
| 58 | + - group: com.microsoft.ml.spark |
| 59 | + artifact: mmlspark_2.11 |
| 60 | + version: '0.12' |
| 61 | + precachePackages: true |
| 62 | +# Databricks details |
| 63 | + databricks: |
| 64 | +# List of maven libraries. |
| 65 | + mavenLibraries: [] |
| 66 | +# List of PyPi libraries |
| 67 | + pypiLibraries: [] |
| 68 | +# List of RCran libraries |
| 69 | + rcranLibraries: [] |
| 70 | +# List of JAR libraries |
| 71 | + jarLibraries: [] |
| 72 | +# List of Egg libraries |
| 73 | + eggLibraries: [] |
| 74 | +# History details. |
| 75 | +history: |
| 76 | +# Enable history tracking -- this allows status, logs, metrics, and outputs |
| 77 | +# to be collected for a run. |
| 78 | + outputCollection: true |
| 79 | +# whether to take snapshots for history. |
| 80 | + snapshotProject: true |
| 81 | +# Spark configuration details. |
| 82 | +spark: |
| 83 | + configuration: |
| 84 | + spark.app.name: Azure ML Experiment |
| 85 | + spark.yarn.maxAppAttempts: 1 |
| 86 | +# HDI details. |
| 87 | +hdi: |
| 88 | +# Yarn deploy mode. Options are cluster and client. |
| 89 | + yarnDeployMode: cluster |
| 90 | +# Tensorflow details. |
| 91 | +tensorflow: |
| 92 | +# The number of worker tasks. |
| 93 | + workerCount: 1 |
| 94 | +# The number of parameter server tasks. |
| 95 | + parameterServerCount: 1 |
| 96 | +# Mpi details. |
| 97 | +mpi: |
| 98 | +# When using MPI, number of processes per node. |
| 99 | + processCountPerNode: 1 |
| 100 | +# data reference configuration details |
| 101 | +dataReferences: {} |
| 102 | +# Project share datastore reference. |
| 103 | +sourceDirectoryDataStore: |
| 104 | +# AmlCompute details. |
| 105 | +amlcompute: |
| 106 | +# VM size of the Cluster to be created.Allowed values are Azure vm sizes.The list of vm sizes is available in 'https://docs.microsoft.com/en-us/azure/cloud-services/cloud-services-sizes-specs |
| 107 | + vmSize: |
| 108 | +# VM priority of the Cluster to be created.Allowed values are "dedicated" , "lowpriority". |
| 109 | + vmPriority: |
| 110 | +# A bool that indicates if the cluster has to be retained after job completion. |
| 111 | + retainCluster: false |
| 112 | +# Name of the cluster to be created. If not specified, runId will be used as cluster name. |
| 113 | + name: |
| 114 | +# Maximum number of nodes in the AmlCompute cluster to be created. Minimum number of nodes will always be set to 0. |
| 115 | + clusterMaxNodeCount: 1 |
0 commit comments