|
210 | 210 | "source": [ |
211 | 211 | "## Create compute resources for your training experiments\n", |
212 | 212 | "\n", |
213 | | - "Many of the subsequent examples use BatchAI clusters to train models at scale. To create a **CPU** cluster now, run the cell below. The autoscale settings mean that the cluster will scale down to 0 nodes when inactive and up to 8 nodes when busy." |
| 213 | + "Many of the subsequent examples use BatchAI clusters to train models at scale. To create a **CPU** cluster now, run the cell below. The autoscale settings mean that the cluster will scale down to 0 nodes when inactive and up to 4 nodes when busy." |
214 | 214 | ] |
215 | 215 | }, |
216 | 216 | { |
|
233 | 233 | " compute_config = BatchAiCompute.provisioning_configuration(vm_size='STANDARD_D2_V2', \n", |
234 | 234 | " autoscale_enabled=True,\n", |
235 | 235 | " cluster_min_nodes=0, \n", |
236 | | - " cluster_max_nodes=6)\n", |
| 236 | + " cluster_max_nodes=4)\n", |
237 | 237 | " cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n", |
238 | 238 | "\n", |
239 | 239 | "cpu_cluster.wait_for_completion(show_output=True)" |
|
266 | 266 | " compute_config = BatchAiCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n", |
267 | 267 | " autoscale_enabled=True,\n", |
268 | 268 | " cluster_min_nodes=0, \n", |
269 | | - " cluster_max_nodes=6)\n", |
| 269 | + " cluster_max_nodes=4)\n", |
270 | 270 | " gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)\n", |
271 | 271 | "\n", |
272 | 272 | "gpu_cluster.wait_for_completion(show_output=True)" |
|
0 commit comments