diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_internal/entities/component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_internal/entities/component.py index 7e91c0830008..b255da409a65 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_internal/entities/component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_internal/entities/component.py @@ -115,7 +115,7 @@ def __init__( tags=tags, properties=properties, display_name=display_name, - is_deterministic=is_deterministic, + is_deterministic=is_deterministic, # type: ignore[arg-type] inputs=inputs, outputs=outputs, yaml_str=yaml_str, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_local_endpoints/validators/code_validator.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_local_endpoints/validators/code_validator.py index 3e0fd6f2c7c3..bbe7c9717665 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_local_endpoints/validators/code_validator.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_local_endpoints/validators/code_validator.py @@ -88,15 +88,18 @@ def _local_scoring_script_is_valid(deployment: OnlineDeployment): def _code_configuration_contains_cloud_artifacts(deployment: OnlineDeployment): # If the deployment.code_configuration.code is a string, then it is the cloud code artifact name or full arm ID - return isinstance(deployment.code_configuration.code, str) and ( - is_url(deployment.code_configuration.code) or deployment.code_configuration.code.startswith(ARM_ID_PREFIX) + return isinstance(deployment.code_configuration.code, str) and ( # type: ignore[union-attr] + is_url(deployment.code_configuration.code) # type: ignore[union-attr] + or deployment.code_configuration.code.startswith(ARM_ID_PREFIX) # type: ignore[union-attr] ) def _get_local_code_configuration_artifacts( deployment: OnlineDeployment, ) -> Path: - return Path(deployment._base_path, deployment.code_configuration.code).resolve() + return Path( + deployment._base_path, deployment.code_configuration.code # type: ignore[union-attr, arg-type] + ).resolve() def _get_cloud_code_configuration_artifacts(code: str, code_operations: CodeOperations, download_path: str) -> str: diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py index 396a302fc756..8660031df939 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py @@ -76,7 +76,7 @@ def __init__( type: Optional[str] = None, # pylint: disable=redefined-builtin path: Optional[Union[str, PathLike]] = None, utc_time_created: Optional[str] = None, - flavors: Optional[Dict] = None, + flavors: Optional[Dict[str, Dict[str, Any]]] = None, description: Optional[str] = None, tags: Optional[Dict] = None, properties: Optional[Dict] = None, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/environment.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/environment.py index fd04cc56eb6e..6f362b9d9572 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/environment.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/environment.py @@ -124,7 +124,7 @@ def __init__( **kwargs: Any, ): self._arm_type: str = "" - self.latest_version: str = "" + self.latest_version: str = "" # type: ignore[assignment] self.image: Optional[str] = None inference_config = kwargs.pop("inference_config", None) os_type = kwargs.pop("os_type", None) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py index 4142ac9d91f8..f6ee6715e941 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/command.py @@ -1,9 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- - # pylint: disable=protected-access - import copy import logging import os @@ -170,7 +168,9 @@ def __init__( environment: Optional[Union[Environment, str]] = None, environment_variables: Optional[Dict] = None, resources: Optional[JobResourceConfiguration] = None, - services: Optional[Dict] = None, + services: Optional[ + Dict[str, Union[JobService, JupyterLabJobService, SshJobService, TensorBoardJobService, VsCodeJobService]] + ] = None, queue_settings: Optional[QueueSettings] = None, **kwargs: Any, ) -> None: @@ -205,7 +205,7 @@ def __init__( self.queue_settings = queue_settings if isinstance(self.component, CommandComponent): - self.resources = self.resources or self.component.resources + self.resources = self.resources or self.component.resources # type: ignore[assignment] self.distribution = self.distribution or self.component.distribution self._swept: bool = False @@ -277,12 +277,12 @@ def distribution( self._distribution = value @property - def resources(self) -> Any: + def resources(self) -> JobResourceConfiguration: """The compute resource configuration for the command component or job. :rtype: ~azure.ai.ml.entities.JobResourceConfiguration """ - return self._resources + return cast(JobResourceConfiguration, self._resources) @resources.setter def resources(self, value: Union[Dict, JobResourceConfiguration]) -> None: @@ -381,10 +381,10 @@ def services( ~azure.ai.ml.entities.SshJobService, ~azure.ai.ml.entities.TensorBoardJobService, ~azure.ai.ml.entities.VsCodeJobService]] """ - self._services = _resolve_job_services(value) + self._services = _resolve_job_services(value) # type: ignore[assignment] @property - def component(self) -> Any: + def component(self) -> Union[str, CommandComponent]: """The ID or instance of the command component or job to be run for the step. :return: The ID or instance of the command component or job to be run for the step. @@ -855,7 +855,9 @@ def _load_from_rest_job(cls, obj: JobBase) -> "Command": outputs=from_rest_data_outputs(rest_command_job.outputs), ) command_job._id = obj.id - command_job.resources = JobResourceConfiguration._from_rest_object(rest_command_job.resources) + command_job.resources = cast( + JobResourceConfiguration, JobResourceConfiguration._from_rest_object(rest_command_job.resources) + ) command_job.limits = CommandJobLimits._from_rest_object(rest_command_job.limits) command_job.queue_settings = QueueSettings._from_rest_object(rest_command_job.queue_settings) if isinstance(command_job.component, CommandComponent): @@ -939,7 +941,7 @@ def __call__(self, *args: Any, **kwargs: Any) -> "Command": @overload -def _resolve_job_services(services: None) -> None: +def _resolve_job_services(services: Optional[Dict]): ... diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel.py index 62f54a8df826..74ae6e9ae2cc 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel.py @@ -110,7 +110,7 @@ def __init__( compute: Optional[str] = None, inputs: Optional[Dict] = None, outputs: Optional[Dict[str, Union[str, Output, "Output"]]] = None, - retry_settings: Optional[Union[RetrySettings, Dict]] = None, + retry_settings: Optional[Union[RetrySettings, Dict[str, str]]] = None, logging_level: Optional[str] = None, max_concurrency_per_instance: Optional[int] = None, error_threshold: Optional[int] = None, @@ -132,7 +132,8 @@ def __init__( if isinstance(component, FlowComponent): # make input definition fit actual inputs for flow component - with component._inputs._fit_inputs(inputs): # pylint: disable=protected-access + # pylint: disable=protected-access + with component._inputs._fit_inputs(inputs): # type: ignore[attr-defined] BaseNode.__init__( self, type=NodeType.PARALLEL, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel_func.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel_func.py index 7098c598e2b6..8b3880b9610a 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel_func.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/parallel_func.py @@ -11,7 +11,7 @@ ManagedIdentityConfiguration, UserIdentityConfiguration, ) -from azure.ai.ml.entities._job.parallel.retry_settings import RetrySettings +from azure.ai.ml.entities._deployment.deployment_settings import BatchRetrySettings from azure.ai.ml.entities._job.parallel.run_function import RunFunction from .command_func import _parse_input, _parse_inputs_outputs, _parse_output @@ -27,7 +27,7 @@ def parallel_run_function( display_name: Optional[str] = None, experiment_name: Optional[str] = None, compute: Optional[str] = None, - retry_settings: Optional[RetrySettings] = None, + retry_settings: Optional[BatchRetrySettings] = None, environment_variables: Optional[Dict] = None, logging_level: Optional[str] = None, max_concurrency_per_instance: Optional[int] = None, @@ -215,7 +215,7 @@ def parallel_run_function( description=description, inputs=component_inputs, outputs=component_outputs, - retry_settings=retry_settings, + retry_settings=retry_settings, # type: ignore[arg-type] logging_level=logging_level, max_concurrency_per_instance=max_concurrency_per_instance, error_threshold=error_threshold, @@ -238,7 +238,7 @@ def parallel_run_function( description=description, inputs=component_inputs, outputs=component_outputs, - retry_settings=retry_settings, + retry_settings=retry_settings, # type: ignore[arg-type] logging_level=logging_level, max_concurrency_per_instance=max_concurrency_per_instance, error_threshold=error_threshold, @@ -266,7 +266,7 @@ def parallel_run_function( outputs=job_outputs, identity=identity, environment_variables=environment_variables, - retry_settings=retry_settings, + retry_settings=retry_settings, # type: ignore[arg-type] logging_level=logging_level, max_concurrency_per_instance=max_concurrency_per_instance, error_threshold=error_threshold, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/sweep.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/sweep.py index 7d4e97c1c233..e79d95e2f2ca 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/sweep.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_builders/sweep.py @@ -140,7 +140,14 @@ def __init__( early_termination: Optional[ Union[BanditPolicy, MedianStoppingPolicy, TruncationSelectionPolicy, EarlyTerminationPolicy, str] ] = None, - search_space: Optional[Dict] = None, + search_space: Optional[ + Dict[ + str, + Union[ + Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform + ], + ] + ] = None, inputs: Optional[Dict[str, Union[Input, str, bool, int, float]]] = None, outputs: Optional[Dict[str, Union[str, Output]]] = None, identity: Optional[ @@ -191,7 +198,12 @@ def trial(self) -> CommandComponent: @property def search_space( self, - ) -> Optional[Dict]: + ) -> Optional[ + Dict[ + str, + Union[Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform], + ] + ]: """Dictionary of the hyperparameter search space. Each key is the name of a hyperparameter and its value is the parameter expression. @@ -210,7 +222,7 @@ def search_space(self, values: Dict[str, Dict[str, Union[str, int, float, dict]] :param values: The search space to set. :type values: Dict[str, Dict[str, Union[str, int, float, dict]]] """ - search_space = {} + search_space: Dict = {} for name, value in values.items(): # If value is a SearchSpace object, directly pass it to job.search_space[name] search_space[name] = self._value_type_to_class(value) if isinstance(value, dict) else value @@ -341,7 +353,7 @@ def _to_job(self) -> SweepJob: sampling_algorithm=self.sampling_algorithm, search_space=self.search_space, limits=self.limits, - early_termination=self.early_termination, + early_termination=self.early_termination, # type: ignore[arg-type] objective=self.objective, inputs=self._job_inputs, outputs=self._job_outputs, @@ -420,7 +432,7 @@ def __setattr__(self, key: Any, value: Any) -> None: self.early_termination.slack_factor = None @property - def early_termination(self) -> Optional[EarlyTerminationPolicy]: + def early_termination(self) -> Optional[Union[str, EarlyTerminationPolicy]]: """The early termination policy for the sweep job. :rtype: Union[str, ~azure.ai.ml.sweep.BanditPolicy, ~azure.ai.ml.sweep.MedianStoppingPolicy, @@ -429,7 +441,7 @@ def early_termination(self) -> Optional[EarlyTerminationPolicy]: return self._early_termination @early_termination.setter - def early_termination(self, value: Optional[EarlyTerminationPolicy]) -> None: + def early_termination(self, value: Optional[Union[str, EarlyTerminationPolicy]]) -> None: """Sets the early termination policy for the sweep job. :param value: The early termination policy for the sweep job. @@ -439,4 +451,4 @@ def early_termination(self, value: Optional[EarlyTerminationPolicy]) -> None: if isinstance(value, dict): early_termination_schema = EarlyTerminationField() value = early_termination_schema._deserialize(value=value, attr=None, data=None) - self._early_termination = value + self._early_termination = value # type: ignore[assignment] diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py index 2df6ff140709..b0085cf77598 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/command_component.py @@ -113,7 +113,7 @@ def __init__( DistributionConfiguration, ] ] = None, - resources: Optional[Union[Dict, JobResourceConfiguration]] = None, + resources: Optional[JobResourceConfiguration] = None, inputs: Optional[Dict] = None, outputs: Optional[Dict] = None, instance_count: Optional[int] = None, # promoted property from resources.instance_count @@ -150,7 +150,7 @@ def __init__( self.code = code self.environment_variables = environment_variables self.environment = environment - self.resources = resources + self.resources = resources # type: ignore[assignment] self.distribution = distribution # check mutual exclusivity of promoted properties diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py index 8654103af6d2..5d6353e73292 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/component.py @@ -101,7 +101,7 @@ def __init__( tags: Optional[Dict] = None, properties: Optional[Dict] = None, display_name: Optional[str] = None, - is_deterministic: Optional[bool] = True, + is_deterministic: bool = True, inputs: Optional[Dict] = None, outputs: Optional[Dict] = None, yaml_str: Optional[str] = None, @@ -423,7 +423,7 @@ def _from_container_rest_object(cls, component_container_rest_object: ComponentC properties=component_container_details.properties, type=NodeType._CONTAINER, # Set this field to None as it hold a default True in init. - is_deterministic=None, + is_deterministic=None, # type: ignore[arg-type] ) component.latest_version = component_container_details.latest_version return component diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/flow.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/flow.py index 8922993371ad..86d2629cacab 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/flow.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/flow.py @@ -409,7 +409,7 @@ def _to_rest_object(self) -> ComponentVersion: def _func(self, **kwargs: Any) -> "Parallel": # pylint: disable=invalid-overridden-method from azure.ai.ml.entities._builders.parallel import Parallel - with self._inputs._fit_inputs(kwargs): # pylint: disable=protected-access + with self._inputs._fit_inputs(kwargs): # type: ignore[attr-defined] # pylint: disable=not-callable return super()._func(**kwargs) # type: ignore diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/pipeline_component.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/pipeline_component.py index e9c1501bdae1..a6db0a14043c 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/pipeline_component.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/pipeline_component.py @@ -81,7 +81,7 @@ def __init__( display_name=display_name, inputs=inputs, outputs=outputs, - is_deterministic=is_deterministic, + is_deterministic=is_deterministic, # type: ignore[arg-type] **kwargs, ) self._jobs = self._process_jobs(jobs) if jobs else {} diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_compute/_setup_scripts.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_compute/_setup_scripts.py index 169f1f8a26c8..d2e12fd4032f 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_compute/_setup_scripts.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_compute/_setup_scripts.py @@ -4,7 +4,7 @@ # pylint: disable=protected-access import re -from typing import Optional +from typing import Optional, cast from azure.ai.ml._restclient.v2022_10_01_preview.models import ScriptReference as RestScriptReference from azure.ai.ml._restclient.v2022_10_01_preview.models import ScriptsToExecute as RestScriptsToExecute @@ -24,7 +24,7 @@ class ScriptReference(RestTranslatableMixin): """ def __init__( - self, *, path: Optional[str] = None, command: Optional[str] = None, timeout_minutes: Optional[str] = None + self, *, path: Optional[str] = None, command: Optional[str] = None, timeout_minutes: Optional[int] = None ) -> None: self.path = path self.command = command @@ -47,7 +47,7 @@ def _from_rest_object(cls, obj: RestScriptReference) -> Optional["ScriptReferenc script_reference = ScriptReference( path=obj.script_data if obj.script_data else None, command=obj.script_arguments if obj.script_arguments else None, - timeout_minutes=timeout_minutes, + timeout_minutes=cast(Optional[int], timeout_minutes), ) return script_reference diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/adls_gen1.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/adls_gen1.py index 722091b365e8..6c863af14743 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/adls_gen1.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/adls_gen1.py @@ -5,7 +5,7 @@ # pylint: disable=protected-access,no-member from pathlib import Path -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, Union from azure.ai.ml._restclient.v2023_04_01_preview.models import ( AzureDataLakeGen1Datastore as RestAzureDatalakeGen1Datastore, @@ -14,6 +14,7 @@ from azure.ai.ml._restclient.v2023_04_01_preview.models import DatastoreType from azure.ai.ml._schema._datastore.adls_gen1 import AzureDataLakeGen1Schema from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY, TYPE +from azure.ai.ml.entities._credentials import CertificateConfiguration, ServicePrincipalConfiguration from azure.ai.ml.entities._datastore.datastore import Datastore from azure.ai.ml.entities._datastore.utils import from_rest_datastore_credentials from azure.ai.ml.entities._util import load_from_dict @@ -46,7 +47,7 @@ def __init__( description: Optional[str] = None, tags: Optional[Dict] = None, properties: Optional[Dict] = None, - credentials: Any = None, + credentials: Optional[Union[CertificateConfiguration, ServicePrincipalConfiguration]] = None, **kwargs: Any ): kwargs[TYPE] = DatastoreType.AZURE_DATA_LAKE_GEN1 @@ -81,7 +82,7 @@ def _from_rest_object(cls, datastore_resource: DatastoreData) -> "AzureDataLakeG id=datastore_resource.id, name=datastore_resource.name, store_name=properties.store_name, - credentials=from_rest_datastore_credentials(properties.credentials), + credentials=from_rest_datastore_credentials(properties.credentials), # type: ignore[arg-type] description=properties.description, tags=properties.tags, ) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/azure_storage.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/azure_storage.py index 6bfce4e09db9..7eed6e3e3f80 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/azure_storage.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/azure_storage.py @@ -5,7 +5,7 @@ # pylint: disable=protected-access,no-member from pathlib import Path -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, Union from azure.ai.ml._azure_environments import _get_storage_endpoint_from_metadata from azure.ai.ml._restclient.v2023_04_01_preview.models import AzureBlobDatastore as RestAzureBlobDatastore @@ -17,6 +17,7 @@ from azure.ai.ml._restclient.v2023_04_01_preview.models import DatastoreType from azure.ai.ml._schema._datastore import AzureBlobSchema, AzureDataLakeGen2Schema, AzureFileSchema from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY, TYPE +from azure.ai.ml.entities._credentials import AccountKeyConfiguration, SasTokenConfiguration from azure.ai.ml.entities._datastore.datastore import Datastore from azure.ai.ml.entities._datastore.utils import from_rest_datastore_credentials from azure.ai.ml.entities._util import load_from_dict @@ -159,7 +160,7 @@ def __init__( endpoint: Optional[str] = None, protocol: str = HTTPS, properties: Optional[Dict] = None, - credentials: Any = None, + credentials: Optional[Union[AccountKeyConfiguration, SasTokenConfiguration]] = None, **kwargs: Any ): kwargs[TYPE] = DatastoreType.AZURE_BLOB @@ -196,7 +197,7 @@ def _from_rest_object(cls, datastore_resource: DatastoreData) -> "AzureBlobDatas name=datastore_resource.name, id=datastore_resource.id, account_name=properties.account_name, - credentials=from_rest_datastore_credentials(properties.credentials), + credentials=from_rest_datastore_credentials(properties.credentials), # type: ignore[arg-type] endpoint=properties.endpoint, protocol=properties.protocol, container_name=properties.container_name, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/one_lake.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/one_lake.py index 65cc9e2990dd..469a0594166f 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/one_lake.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_datastore/one_lake.py @@ -6,7 +6,7 @@ from abc import ABC from pathlib import Path -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, Union from azure.ai.ml._restclient.v2023_04_01_preview.models import Datastore as DatastoreData from azure.ai.ml._restclient.v2023_04_01_preview.models import DatastoreType @@ -16,6 +16,7 @@ from azure.ai.ml._schema._datastore.one_lake import OneLakeSchema from azure.ai.ml._utils._experimental import experimental from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY, TYPE +from azure.ai.ml.entities._credentials import NoneCredentialConfiguration, ServicePrincipalConfiguration from azure.ai.ml.entities._datastore.datastore import Datastore from azure.ai.ml.entities._datastore.utils import from_rest_datastore_credentials from azure.ai.ml.entities._mixins import DictMixin, RestTranslatableMixin @@ -88,7 +89,7 @@ def __init__( description: Optional[str] = None, tags: Optional[Dict] = None, properties: Optional[Dict] = None, - credentials: Any = None, + credentials: Optional[Union[NoneCredentialConfiguration, ServicePrincipalConfiguration]] = None, **kwargs: Any ): kwargs[TYPE] = DatastoreType.ONE_LAKE @@ -126,7 +127,7 @@ def _from_rest_object(cls, datastore_resource: DatastoreData) -> "OneLakeDatasto artifact=LakeHouseArtifact(name=properties.artifact.artifact_name), one_lake_workspace_name=properties.one_lake_workspace_name, endpoint=properties.endpoint, - credentials=from_rest_datastore_credentials(properties.credentials), + credentials=from_rest_datastore_credentials(properties.credentials), # type: ignore[arg-type] description=properties.description, tags=properties.tags, ) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_deployment/deployment.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_deployment/deployment.py index 1326229eef21..380ed98b0345 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_deployment/deployment.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_deployment/deployment.py @@ -72,7 +72,7 @@ def __init__( tags: Optional[Dict[str, Any]] = None, properties: Optional[Dict[str, Any]] = None, model: Optional[Union[str, "Model"]] = None, - code_configuration: Any = None, + code_configuration: Optional[CodeConfiguration] = None, environment: Optional[Union[str, "Environment"]] = None, environment_variables: Optional[Dict[str, str]] = None, code_path: Optional[Union[str, PathLike]] = None, @@ -174,7 +174,7 @@ def scoring_script(self, value: Union[str, PathLike]) -> None: if not self.code_configuration: self.code_configuration = CodeConfiguration() - self.code_configuration.scoring_script = value + self.code_configuration.scoring_script = value # type: ignore[misc] def dump(self, dest: Union[str, PathLike, IO[AnyStr]], **kwargs: Any) -> None: """Dump the deployment content into a file in yaml format. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_endpoint/online_endpoint.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_endpoint/online_endpoint.py index f44543c02165..fad915fcd505 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_endpoint/online_endpoint.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_endpoint/online_endpoint.py @@ -404,7 +404,7 @@ def dump( self, dest: Optional[Union[str, PathLike, IO[AnyStr]]] = None, # pylint: disable=unused-argument **kwargs: Any, # pylint: disable=unused-argument - ) -> Dict: + ) -> Dict[str, Any]: context = {BASE_PATH_CONTEXT_KEY: Path(".").parent} res: dict = KubernetesOnlineEndpointSchema(context=context).dump(self) return res diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs/input.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs/input.py index 0ed8270c8184..96a2fe233512 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs/input.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs/input.py @@ -118,10 +118,10 @@ def __init__( :paramtype default: Union[str, int, float, bool] :keyword min: The minimum value for the input. If a value smaller than the minimum is passed to the job, the job execution will fail. - :paramtype min: Union[int, float] + :paramtype min: Optional[float] :keyword max: The maximum value for the input. If a value larger than the maximum is passed to a job, the job execution will fail. - :paramtype max: Union[int, float] + :paramtype max: Optional[float] :keyword optional: Specifies if the input is optional. :paramtype optional: bool :keyword description: Description of the input @@ -150,10 +150,10 @@ def __init__( :paramtype default: Union[str, int, float, bool] :keyword min: The minimum value for the input. If a value smaller than the minimum is passed to the job, the job execution will fail. - :paramtype min: Union[int, float] + :paramtype min: Optional[int] :keyword max: The maximum value for the input. If a value larger than the maximum is passed to a job, the job execution will fail. - :paramtype max: Union[int, float] + :paramtype max: Optional[int] :keyword optional: Specifies if the input is optional. :paramtype optional: bool :keyword description: Description of the input diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs/output.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs/output.py index fc6a3691cac9..bc245126d20a 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs/output.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_inputs_outputs/output.py @@ -23,7 +23,8 @@ class Output(_InputOutputBase): @overload def __init__( self, - type: Any, + *, + type: str, path: Optional[str] = None, mode: Optional[str] = None, description: Optional[str] = None, @@ -34,6 +35,7 @@ def __init__( @overload def __init__( self, + *, type: Literal["uri_folder"] = "uri_folder", path: Optional[str] = None, mode: Optional[str] = None, @@ -83,6 +85,7 @@ def __init__( @overload def __init__( self, + *, type: Literal["uri_file"] = "uri_file", path: Optional[str] = None, mode: Optional[str] = None, @@ -111,6 +114,7 @@ def __init__( def __init__( self, + *, type: str = AssetTypes.URI_FOLDER, path: Optional[str] = None, mode: Optional[str] = None, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/nlp/nlp_search_space.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/nlp/nlp_search_space.py index a4b087cdbd85..e4ad435fe57e 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/nlp/nlp_search_space.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/nlp/nlp_search_space.py @@ -57,15 +57,15 @@ class NlpSearchSpace(RestTranslatableMixin): def __init__( self, *, - gradient_accumulation_steps: Optional[Union[bool, int, float, str, SweepDistribution]] = None, - learning_rate: Optional[Union[bool, int, float, str, SweepDistribution]] = None, - learning_rate_scheduler: Optional[Union[bool, int, float, str, SweepDistribution]] = None, - model_name: Optional[Union[bool, int, float, str, SweepDistribution]] = None, - number_of_epochs: Optional[Union[bool, int, float, str, SweepDistribution]] = None, - training_batch_size: Optional[Union[bool, int, float, str, SweepDistribution]] = None, - validation_batch_size: Optional[Union[bool, int, float, str, SweepDistribution]] = None, - warmup_ratio: Optional[Union[bool, int, float, str, SweepDistribution]] = None, - weight_decay: Optional[Union[bool, int, float, str, SweepDistribution]] = None + gradient_accumulation_steps: Optional[Union[int, SweepDistribution]] = None, + learning_rate: Optional[Union[float, SweepDistribution]] = None, + learning_rate_scheduler: Optional[Union[str, SweepDistribution]] = None, + model_name: Optional[Union[str, SweepDistribution]] = None, + number_of_epochs: Optional[Union[int, SweepDistribution]] = None, + training_batch_size: Optional[Union[int, SweepDistribution]] = None, + validation_batch_size: Optional[Union[int, SweepDistribution]] = None, + warmup_ratio: Optional[Union[float, SweepDistribution]] = None, + weight_decay: Optional[Union[float, SweepDistribution]] = None ): # Since we want customers to be able to specify enums as well rather than just strings, we need to access # the enum values here before we serialize them ('NlpModels.BERT_BASE_CASED' vs. 'bert-base-cased'). diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/tabular/featurization_settings.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/tabular/featurization_settings.py index 2666e002f5c4..6ef2332ee370 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/tabular/featurization_settings.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/tabular/featurization_settings.py @@ -67,7 +67,7 @@ def __init__( blocked_transformers: Optional[List[Union[BlockedTransformers, str]]] = None, column_name_and_types: Optional[Dict[str, str]] = None, dataset_language: Optional[str] = None, - transformer_params: Optional[Dict] = None, + transformer_params: Optional[Dict[str, List[ColumnTransformer]]] = None, mode: Optional[str] = None, enable_dnn_featurization: Optional[bool] = None, ): @@ -138,7 +138,7 @@ def _to_rest_object(self) -> RestTabularFeaturizationSettings: @classmethod def _from_rest_object(cls, obj: RestTabularFeaturizationSettings) -> "TabularFeaturizationSettings": rest_transformers_params = obj.transformer_params - transformer_dict = None + transformer_dict: Optional[Dict] = None if rest_transformers_params: transformer_dict = {} for key, settings in rest_transformers_params.items(): diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/command_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/command_job.py index 280a2cb5dcf6..d06ab15b41e8 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/command_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/command_job.py @@ -22,7 +22,7 @@ UserIdentityConfiguration, _BaseJobIdentityConfiguration, ) -from azure.ai.ml.entities._inputs_outputs import Input +from azure.ai.ml.entities._inputs_outputs import Input, Output from azure.ai.ml.entities._job._input_output_helpers import ( from_rest_data_outputs, from_rest_inputs_to_dataset_literal, @@ -31,7 +31,14 @@ validate_inputs_for_command, ) from azure.ai.ml.entities._job.distribution import DistributionConfiguration -from azure.ai.ml.entities._job.job_service import JobServiceBase +from azure.ai.ml.entities._job.job_service import ( + JobService, + JobServiceBase, + JupyterLabJobService, + SshJobService, + TensorBoardJobService, + VsCodeJobService, +) from azure.ai.ml.entities._system_data import SystemData from azure.ai.ml.entities._util import load_from_dict from azure.ai.ml.exceptions import ErrorCategory, ErrorTarget, ValidationErrorType, ValidationException @@ -82,12 +89,14 @@ def __init__( self, *, inputs: Optional[Dict[str, Union[Input, str, bool, int, float]]] = None, - outputs: Optional[Dict] = None, + outputs: Optional[Dict[str, Output]] = None, limits: Optional[CommandJobLimits] = None, identity: Optional[ Union[Dict, ManagedIdentityConfiguration, AmlTokenConfiguration, UserIdentityConfiguration] ] = None, - services: Optional[Dict] = None, + services: Optional[ + Dict[str, Union[JobService, JupyterLabJobService, SshJobService, TensorBoardJobService, VsCodeJobService]] + ] = None, **kwargs: Any, ) -> None: kwargs[TYPE] = JobType.COMMAND @@ -95,8 +104,8 @@ def __init__( super().__init__(**kwargs) - self.outputs = outputs - self.inputs = inputs + self.outputs = outputs # type: ignore[assignment] + self.inputs = inputs # type: ignore[assignment] self.limits = limits self.identity = identity self.services = services @@ -255,8 +264,8 @@ def _to_node(self, context: Optional[Dict] = None, **kwargs: Any) -> "Command": component=component, compute=self.compute, # Need to supply the inputs with double curly. - inputs=self.inputs, - outputs=self.outputs, + inputs=self.inputs, # type: ignore[arg-type] + outputs=self.outputs, # type: ignore[arg-type] environment_variables=self.environment_variables, description=self.description, tags=self.tags, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/data_transfer/data_transfer_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/data_transfer/data_transfer_job.py index d004537ef57d..4ed816d5cf9e 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/data_transfer/data_transfer_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/data_transfer/data_transfer_job.py @@ -17,6 +17,7 @@ from azure.ai.ml.constants import JobType from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY, TYPE from azure.ai.ml.constants._component import DataTransferBuiltinComponentUri, DataTransferTaskType, ExternalDataType +from azure.ai.ml.entities._inputs_outputs import Input, Output from azure.ai.ml.entities._inputs_outputs.external_data import Database, FileSystem from azure.ai.ml.entities._util import load_from_dict from azure.ai.ml.exceptions import ErrorCategory, ErrorTarget, ValidationErrorType, ValidationException @@ -152,16 +153,16 @@ class DataTransferCopyJob(DataTransferJob): def __init__( self, *, - inputs: Optional[Dict] = None, - outputs: Optional[Dict] = None, + inputs: Optional[Dict[str, Union[Input, str]]] = None, + outputs: Optional[Dict[str, Union[Output]]] = None, data_copy_mode: Optional[str] = None, **kwargs: Any, ): kwargs["task"] = DataTransferTaskType.COPY_DATA super().__init__(**kwargs) - self.outputs = outputs - self.inputs = inputs + self.outputs = outputs # type: ignore[assignment] + self.inputs = inputs # type: ignore[assignment] self.data_copy_mode = data_copy_mode def _to_dict(self) -> Dict: @@ -217,8 +218,8 @@ def _to_node(self, context: Optional[Dict] = None, **kwargs: Any) -> "DataTransf component=component, compute=self.compute, # Need to supply the inputs with double curly. - inputs=self.inputs, - outputs=self.outputs, + inputs=self.inputs, # type: ignore[arg-type] + outputs=self.outputs, # type: ignore[arg-type] description=self.description, tags=self.tags, display_name=self.display_name, @@ -229,14 +230,14 @@ class DataTransferImportJob(DataTransferJob): def __init__( self, *, - outputs: Optional[Dict] = None, + outputs: Optional[Dict[str, Union[Output]]] = None, source: Optional[Union[Dict, Database, FileSystem]] = None, **kwargs: Any, ): kwargs["task"] = DataTransferTaskType.IMPORT_DATA super().__init__(**kwargs) - self.outputs = outputs + self.outputs = outputs # type: ignore[assignment] self.source = self._build_source_sink(source) def _to_dict(self) -> Dict: @@ -285,7 +286,7 @@ def _to_node(self, context: Optional[Dict] = None, **kwargs: Any) -> "DataTransf component=component, compute=self.compute, source=self.source, - outputs=self.outputs, + outputs=self.outputs, # type: ignore[arg-type] description=self.description, tags=self.tags, display_name=self.display_name, @@ -297,14 +298,14 @@ class DataTransferExportJob(DataTransferJob): def __init__( self, *, - inputs: Optional[Dict] = None, + inputs: Optional[Dict[str, Union[Input]]] = None, sink: Optional[Union[Dict, Database, FileSystem]] = None, **kwargs: Any, ): kwargs["task"] = DataTransferTaskType.EXPORT_DATA super().__init__(**kwargs) - self.inputs = inputs + self.inputs = inputs # type: ignore[assignment] self.sink = self._build_source_sink(sink) def _to_dict(self) -> Dict: @@ -357,7 +358,7 @@ def _to_node(self, context: Optional[Dict] = None, **kwargs: Any) -> "DataTransf component=component, compute=self.compute, sink=self.sink, - inputs=self.inputs, + inputs=self.inputs, # type: ignore[arg-type] description=self.description, tags=self.tags, display_name=self.display_name, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job_io_mixin.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job_io_mixin.py index d466cd300538..21db73bac298 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job_io_mixin.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job_io_mixin.py @@ -3,19 +3,20 @@ # --------------------------------------------------------- -from typing import Dict, Optional +from typing import Dict, Union +from azure.ai.ml.entities._inputs_outputs import Input, Output from azure.ai.ml.entities._job._input_output_helpers import build_input_output class JobIOMixin: @property - def inputs(self) -> Optional[Dict]: + def inputs(self) -> Dict[str, Union[Input, str, bool, int, float]]: return self._inputs @inputs.setter - def inputs(self, value: Dict) -> None: - self._inputs = {} + def inputs(self, value: Dict[str, Union[Input, str, bool, int, float]]) -> None: + self._inputs: Dict = {} if not value: return @@ -23,12 +24,12 @@ def inputs(self, value: Dict) -> None: self._inputs[input_name] = build_input_output(input_value) @property - def outputs(self) -> Optional[Dict]: + def outputs(self) -> Dict[str, Output]: return self._outputs @outputs.setter - def outputs(self, value: Dict) -> None: - self._outputs = {} + def outputs(self, value: Dict[str, Output]) -> None: + self._outputs: Dict = {} if not value: return diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job_resource_configuration.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job_resource_configuration.py index 6e3fe734bfee..a5d0ffe07db9 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job_resource_configuration.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/job_resource_configuration.py @@ -124,7 +124,7 @@ def __init__( locations: Optional[List[str]] = None, instance_count: Optional[int] = None, instance_type: Optional[Union[str, List]] = None, - properties: Optional[Properties] = None, + properties: Optional[Union[Properties, Dict]] = None, docker_args: Optional[str] = None, shm_size: Optional[str] = None, max_instance_count: Optional[int] = None, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/parallel/parallel_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/parallel/parallel_job.py index 28752fd20b6c..9310f65ab0db 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/parallel/parallel_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/parallel/parallel_job.py @@ -92,8 +92,8 @@ def __init__( super().__init__(**kwargs) - self.inputs = inputs - self.outputs = outputs + self.inputs = inputs # type: ignore[assignment] + self.outputs = outputs # type: ignore[assignment] self.identity = identity def _to_dict(self) -> Dict: @@ -201,7 +201,7 @@ def _to_node(self, context: Optional[Dict] = None, **kwargs: Any) -> "Parallel": compute=self.compute, # Need to supply the inputs with double curly. inputs=self.inputs, - outputs=self.outputs, + outputs=self.outputs, # type: ignore[arg-type] mini_batch_size=self.mini_batch_size, partition_keys=self.partition_keys, input_data=self.input_data, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/parameterized_command.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/parameterized_command.py index 46f211189601..57604b382e44 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/parameterized_command.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/parameterized_command.py @@ -92,7 +92,7 @@ def __init__( self.environment_variables = dict(environment_variables) if environment_variables else {} self.environment = environment self.distribution = distribution - self.resources = resources + self.resources = resources # type: ignore[assignment] self.queue_settings = queue_settings @property @@ -137,7 +137,7 @@ def distribution(self, value: Union[dict, PyTorchDistribution, MpiDistribution]) self._distribution = value @property - def resources(self) -> Optional[Union[dict, JobResourceConfiguration]]: + def resources(self) -> JobResourceConfiguration: """The compute resource configuration for the command component or job. :return: The compute resource configuration for the command component or job. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_io/base.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_io/base.py index a846040bd511..a63c98bee612 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_io/base.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/pipeline/_io/base.py @@ -620,7 +620,7 @@ def _build_default_data(self) -> None: if self._data is None: # _meta will be None when node._component is not a Component object # so we just leave the type inference work to backend - self._data = Output(type=None) + self._data = Output(type=None) # type: ignore[call-overload] def _build_data(self, data: T) -> Any: """Build output data according to assigned input, eg: node.outputs.key = data diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/spark_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/spark_job.py index 98b08046337e..e49054299a37 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/spark_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/spark_job.py @@ -24,6 +24,7 @@ UserIdentityConfiguration, _BaseJobIdentityConfiguration, ) +from azure.ai.ml.entities._inputs_outputs import Input, Output from azure.ai.ml.entities._job._input_output_helpers import ( from_rest_data_outputs, from_rest_inputs_to_dataset_literal, @@ -105,8 +106,8 @@ def __init__( dynamic_allocation_enabled: Optional[Union[bool, str]] = None, dynamic_allocation_min_executors: Optional[Union[int, str]] = None, dynamic_allocation_max_executors: Optional[Union[int, str]] = None, - inputs: Optional[Dict] = None, - outputs: Optional[Dict] = None, + inputs: Optional[Dict[str, Union[Input, str, bool, int, float]]] = None, + outputs: Optional[Dict[str, Output]] = None, compute: Optional[str] = None, identity: Optional[ Union[Dict[str, str], ManagedIdentityConfiguration, AmlTokenConfiguration, UserIdentityConfiguration] @@ -127,8 +128,8 @@ def __init__( self.dynamic_allocation_enabled = dynamic_allocation_enabled self.dynamic_allocation_min_executors = dynamic_allocation_min_executors self.dynamic_allocation_max_executors = dynamic_allocation_max_executors - self.inputs = inputs - self.outputs = outputs + self.inputs = inputs # type: ignore[assignment] + self.outputs = outputs # type: ignore[assignment] self.compute = compute self.resources = resources self.identity = identity @@ -373,8 +374,8 @@ def _to_node(self, context: Optional[Dict] = None, **kwargs: Any) -> "Spark": dynamic_allocation_min_executors=self.dynamic_allocation_min_executors, dynamic_allocation_max_executors=self.dynamic_allocation_max_executors, conf=self.conf, - inputs=self.inputs, - outputs=self.outputs, + inputs=self.inputs, # type: ignore[arg-type] + outputs=self.outputs, # type: ignore[arg-type] compute=self.compute, resources=self.resources, properties=self.properties_sparkJob, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/parameterized_sweep.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/parameterized_sweep.py index 5c1b4b38bbc2..9bfab59275e3 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/parameterized_sweep.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/parameterized_sweep.py @@ -76,7 +76,7 @@ def __init__( :type resources: ~azure.ai.ml.entities.ResourceConfiguration """ self.sampling_algorithm = sampling_algorithm - self.early_termination = early_termination + self.early_termination = early_termination # type: ignore[assignment] self._limits = limits self.search_space = search_space self.queue_settings = queue_settings @@ -270,7 +270,7 @@ def _get_rest_sampling_algorithm(self) -> RestSamplingAlgorithm: ) @property - def early_termination(self) -> Any: + def early_termination(self) -> Optional[Union[str, EarlyTerminationPolicy]]: """Early termination policy for sweep job. :returns: Early termination policy for sweep job. @@ -285,6 +285,7 @@ def early_termination(self, value: Any) -> None: :param value: Early termination policy for sweep job. :type value: ~azure.ai.ml.entities._job.sweep.early_termination_policy.EarlyTerminationPolicy """ + self._early_termination: Optional[Union[str, EarlyTerminationPolicy]] if value is None: self._early_termination = None elif isinstance(value, EarlyTerminationPolicy): diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/search_space.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/search_space.py index b28b4d057f49..0df06d9539ea 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/search_space.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/search_space.py @@ -76,7 +76,7 @@ class Choice(SweepDistribution): :caption: Using Choice distribution to set values for a hyperparameter sweep """ - def __init__(self, values: Optional[List] = None, **kwargs: Any) -> None: + def __init__(self, values: Optional[List[Union[float, str, dict]]] = None, **kwargs: Any) -> None: kwargs.setdefault(TYPE, SearchSpace.CHOICE) super().__init__(**kwargs) self.values = values @@ -116,7 +116,7 @@ def _from_rest_object(cls, obj: List) -> "Choice": from_rest_values.append(from_rest_dict) else: from_rest_values.append(rest_value) - return Choice(values=from_rest_values) + return Choice(values=from_rest_values) # type: ignore[arg-type] class Normal(SweepDistribution): diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/sweep_job.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/sweep_job.py index 129954686839..5adb05c1fa36 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/sweep_job.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/sweep_job.py @@ -51,7 +51,19 @@ ) from .objective import Objective from .parameterized_sweep import ParameterizedSweep -from .search_space import SweepDistribution +from .search_space import ( + Choice, + LogNormal, + LogUniform, + Normal, + QLogNormal, + QLogUniform, + QNormal, + QUniform, + Randint, + SweepDistribution, + Uniform, +) module_logger = logging.getLogger(__name__) @@ -141,9 +153,16 @@ def __init__( compute: Optional[str] = None, limits: Optional[SweepJobLimits] = None, sampling_algorithm: Optional[Union[str, SamplingAlgorithm]] = None, - search_space: Optional[Dict] = None, + search_space: Optional[ + Dict[ + str, + Union[ + Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform + ], + ] + ] = None, objective: Optional[Objective] = None, - trial: Optional[Union[CommandJob, CommandComponent, ParameterizedCommand]] = None, + trial: Optional[Union[CommandJob, CommandComponent]] = None, early_termination: Optional[ Union[EarlyTerminationPolicy, BanditPolicy, MedianStoppingPolicy, TruncationSelectionPolicy] ] = None, @@ -163,8 +182,8 @@ def __init__( compute=compute, **kwargs, ) - self.inputs = inputs - self.outputs = outputs + self.inputs = inputs # type: ignore[assignment] + self.outputs = outputs # type: ignore[assignment] self.trial = trial self.identity = identity @@ -267,6 +286,10 @@ def _load_from_rest(cls, obj: JobBase) -> "SweepJob": # Compute also appears in both layers of the yaml, but only one of the REST. # This should be a required field in one place, but cannot be if its optional in two + _search_space = {} + for param, dist in properties.search_space.items(): + _search_space[param] = SweepDistribution._from_rest_object(dist) + return SweepJob( name=obj.name, id=obj.id, @@ -278,12 +301,10 @@ def _load_from_rest(cls, obj: JobBase) -> "SweepJob": services=properties.services, status=properties.status, creation_context=SystemData._from_rest_object(obj.system_data) if obj.system_data else None, - trial=trial, + trial=trial, # type: ignore[arg-type] compute=properties.compute_id, sampling_algorithm=sampling_algorithm, - search_space={ - param: SweepDistribution._from_rest_object(dist) for (param, dist) in properties.search_space.items() - }, + search_space=_search_space, # type: ignore[arg-type] limits=SweepJobLimits._from_rest_object(properties.limits), early_termination=early_termination, objective=properties.objective, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_monitoring/definition.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_monitoring/definition.py index 8a4521af3ece..75ba3bdb8d6b 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_monitoring/definition.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_monitoring/definition.py @@ -20,8 +20,11 @@ from azure.ai.ml.entities._monitoring.alert_notification import AlertNotification from azure.ai.ml.entities._monitoring.compute import ServerlessSparkCompute from azure.ai.ml.entities._monitoring.signals import ( + CustomMonitoringSignal, DataDriftSignal, DataQualitySignal, + FeatureAttributionDriftSignal, + GenerationSafetyQualitySignal, MonitoringSignal, PredictionDriftSignal, ) @@ -62,7 +65,17 @@ def __init__( *, compute: ServerlessSparkCompute, monitoring_target: Optional[MonitoringTarget] = None, - monitoring_signals: Optional[Dict] = None, + monitoring_signals: Dict[ + str, + Union[ + DataDriftSignal, + DataQualitySignal, + PredictionDriftSignal, + FeatureAttributionDriftSignal, + CustomMonitoringSignal, + GenerationSafetyQualitySignal, + ], + ] = None, # type: ignore[assignment] alert_notification: Optional[Union[Literal["azmonitoring"], AlertNotification]] = None, ) -> None: self.compute = compute @@ -108,6 +121,11 @@ def _from_rest_object( from_rest_alert_notification = AZMONITORING else: from_rest_alert_notification = AlertNotification._from_rest_object(obj.alert_notification_setting) + + _monitoring_signals = {} + for signal_name, signal in obj.signals.items(): + _monitoring_signals[signal_name] = MonitoringSignal._from_rest_object(signal) + return cls( compute=ServerlessSparkCompute._from_rest_object(obj.compute_configuration), monitoring_target=MonitoringTarget( @@ -115,9 +133,7 @@ def _from_rest_object( ) if obj.monitoring_target else None, - monitoring_signals={ - signal_name: MonitoringSignal._from_rest_object(signal) for signal_name, signal in obj.signals.items() - }, + monitoring_signals=_monitoring_signals, # type: ignore[arg-type] alert_notification=from_rest_alert_notification, ) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_registry/registry.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_registry/registry.py index 864ae5f34f46..5caaf40a15b2 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_registry/registry.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_registry/registry.py @@ -41,7 +41,7 @@ def __init__( intellectual_property: Optional[IntellectualProperty] = None, managed_resource_group: Optional[str] = None, mlflow_registry_uri: Optional[str] = None, - replication_locations: Optional[List], + replication_locations: Optional[List[RegistryRegionDetails]], **kwargs: Any, ): """Azure ML registry. @@ -115,7 +115,7 @@ def _to_dict(self) -> Dict: # limited, and would probably just confuse most users. if self.replication_locations and len(self.replication_locations) > 0: if self.replication_locations[0].acr_config and len(self.replication_locations[0].acr_config) > 0: - self.container_registry = self.replication_locations[0].acr_config[0] + self.container_registry = self.replication_locations[0].acr_config[0] # type: ignore[assignment] res: dict = schema.dump(self) return res @@ -170,7 +170,7 @@ def _from_rest_object(cls, rest_obj: RestRegistry) -> Optional["Registry"]: else None, managed_resource_group=real_registry.managed_resource_group, mlflow_registry_uri=real_registry.ml_flow_registry_uri, - replication_locations=replication_locations, + replication_locations=replication_locations, # type: ignore[arg-type] ) # There are differences between what our registry validation schema diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_registry/registry_support_classes.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_registry/registry_support_classes.py index a0db5019d513..c00e86b4635c 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_registry/registry_support_classes.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_registry/registry_support_classes.py @@ -127,7 +127,7 @@ class RegistryRegionDetails: def __init__( self, *, - acr_config: Optional[List] = None, + acr_config: Optional[List[Union[str, SystemCreatedAcrAccount]]] = None, location: Optional[str] = None, storage_config: Optional[Union[List[str], SystemCreatedStorageAccount]] = None, ): @@ -161,7 +161,9 @@ def _from_rest_object(cls, rest_obj: RestRegistryRegionArmDetails) -> Optional[" storages = cls._storage_config_from_rest_object(rest_obj.storage_account_details) return RegistryRegionDetails( - acr_config=converted_acr_details, location=rest_obj.location, storage_config=storages + acr_config=converted_acr_details, # type: ignore[arg-type] + location=rest_obj.location, + storage_config=storages, ) def _to_rest_object(self) -> RestRegistryRegionArmDetails: diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_system_data.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_system_data.py index c1e019d8638c..f3a8c2cdf57e 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_system_data.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_system_data.py @@ -2,11 +2,10 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from datetime import datetime -from typing import Union + +from typing import Any from azure.ai.ml._restclient.v2022_10_01.models import SystemData as RestSystemData -from azure.ai.ml.entities import CreatedByType from azure.ai.ml.entities._mixins import RestTranslatableMixin @@ -43,7 +42,7 @@ class SystemData(RestTranslatableMixin): :paramtype last_modified_at: datetime """ - def __init__(self, **kwargs: Union[str, CreatedByType, datetime]) -> None: + def __init__(self, **kwargs: Any) -> None: self.created_by = kwargs.get("created_by", None) self.created_by_type = kwargs.get("created_by_type", None) self.created_at = kwargs.get("created_at", None) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_workspace/networking.py b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_workspace/networking.py index 5adba75150b4..cf85c570dd45 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_workspace/networking.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/entities/_workspace/networking.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- from abc import ABC -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional from azure.ai.ml._restclient.v2023_08_01_preview.models import FqdnOutboundRule as RestFqdnOutboundRule from azure.ai.ml._restclient.v2023_08_01_preview.models import ( @@ -256,7 +256,7 @@ def __init__( self, *, isolation_mode: str = IsolationMode.DISABLED, - outbound_rules: Optional[Any] = None, + outbound_rules: Optional[List[OutboundRule]] = None, network_id: Optional[str] = None, **kwargs: Any, ) -> None: @@ -268,7 +268,8 @@ def __init__( def _to_rest_object(self) -> RestManagedNetwork: rest_outbound_rules = ( { - outbound_rule.name: outbound_rule._to_rest_object() # pylint: disable=protected-access + # pylint: disable=protected-access + outbound_rule.name: outbound_rule._to_rest_object() # type: ignore[attr-defined] for outbound_rule in self.outbound_rules } if self.outbound_rules @@ -288,7 +289,7 @@ def _from_rest_object(cls, obj: RestManagedNetwork) -> "ManagedNetwork": ) return ManagedNetwork( isolation_mode=obj.isolation_mode, - outbound_rules=from_rest_outbound_rules, + outbound_rules=from_rest_outbound_rules, # type: ignore[arg-type] network_id=obj.network_id, status=obj.status, ) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_feature_set_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_feature_set_operations.py index a84528c24289..98cfc0a94951 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_feature_set_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_feature_set_operations.py @@ -139,7 +139,7 @@ def get(self, name: str, version: str, **kwargs: Dict) -> Optional[FeatureSet]: """ try: featureset_version_resource = self._get(name, version, **kwargs) - return FeatureSet._from_rest_object(featureset_version_resource) + return FeatureSet._from_rest_object(featureset_version_resource) # type: ignore[return-value] except (ValidationException, SchemaValidationError) as ex: log_and_raise_error(ex) @@ -360,7 +360,7 @@ def get_feature( **kwargs, ) - return Feature._from_rest_object(feature) + return Feature._from_rest_object(feature) # type: ignore[return-value] @distributed_trace @monitor_with_activity(ops_logger, "FeatureSet.Archive", ActivityType.PUBLICAPI) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_feature_store_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_feature_store_operations.py index e76794cce543..571f5d83c8e1 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_feature_store_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_feature_store_operations.py @@ -113,7 +113,7 @@ def list(self, *, scope: str = Scope.RESOURCE_GROUP, **kwargs: Dict) -> Iterable @distributed_trace @monitor_with_activity(ops_logger, "FeatureStore.Get", ActivityType.PUBLICAPI) # pylint: disable=arguments-renamed - def get(self, name: str, **kwargs: Any) -> Optional[FeatureStore]: + def get(self, name: str, **kwargs: Any) -> FeatureStore: """Get a feature store by name. :param name: Name of the feature store. @@ -122,7 +122,7 @@ def get(self, name: str, **kwargs: Any) -> Optional[FeatureStore]: :rtype: FeatureStore """ - feature_store = None + feature_store: Any = None resource_group = kwargs.get("resource_group") or self._resource_group_name rest_workspace_obj = kwargs.get("rest_workspace_obj", None) or self._operation.get(resource_group, name) if rest_workspace_obj and rest_workspace_obj.kind and rest_workspace_obj.kind.lower() == FEATURE_STORE_KIND: diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py index adde5fa09454..8a3e9e34c810 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py @@ -30,8 +30,8 @@ from azure.ai.ml._restclient.v2023_04_01_preview import AzureMachineLearningWorkspaces as ServiceClient022023Preview from azure.ai.ml._restclient.v2023_04_01_preview.models import JobBase, ListViewType, UserIdentity from azure.ai.ml._restclient.v2023_08_01_preview.models import JobType as RestJobType -from azure.ai.ml._restclient.v2024_01_01_preview.models import JobType as RestJobType_20240101 from azure.ai.ml._restclient.v2024_01_01_preview.models import JobBase as JobBase_2401 +from azure.ai.ml._restclient.v2024_01_01_preview.models import JobType as RestJobType_20240101 from azure.ai.ml._scope_dependent_operations import ( OperationConfig, OperationsContainer, @@ -72,8 +72,8 @@ from azure.ai.ml.entities._datastore._constants import WORKSPACE_BLOB_STORE from azure.ai.ml.entities._inputs_outputs import Input from azure.ai.ml.entities._job.automl.automl_job import AutoMLJob -from azure.ai.ml.entities._job.finetuning.finetuning_job import FineTuningJob from azure.ai.ml.entities._job.base_job import _BaseJob +from azure.ai.ml.entities._job.finetuning.finetuning_job import FineTuningJob from azure.ai.ml.entities._job.import_job import ImportJob from azure.ai.ml.entities._job.job import _is_pipeline_child_job from azure.ai.ml.entities._job.parallel.parallel_job import ParallelJob diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_local_deployment_helper.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_local_deployment_helper.py index dea4e0fada0a..d5740e563934 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_local_deployment_helper.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_local_deployment_helper.py @@ -233,7 +233,7 @@ def _create_deployment( endpoint_name=endpoint_name, deployment_name=str(deployment_name), yaml_code_directory_path=str(code_directory_path), - yaml_code_scoring_script_file_name=deployment.code_configuration.scoring_script + yaml_code_scoring_script_file_name=deployment.code_configuration.scoring_script # type: ignore if code_directory_path else None, model_directory_path=model_directory_path, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py index d24996227fd0..4c7d91775ebe 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py @@ -403,10 +403,10 @@ def archive( :param name: Name of model asset. :type name: str - :keyword version: Version of model asset. - :paramtype version: str - :keyword label: Label of the model asset. (mutually exclusive with version) - :paramtype label: str + :param version: Version of model asset. + :type version: str + :param label: Label of the model asset. (mutually exclusive with version) + :type label: str .. admonition:: Example: @@ -439,10 +439,10 @@ def restore( :param name: Name of model asset. :type name: str - :keyword version: Version of model asset. - :paramtype version: str - :keyword label: Label of the model asset. (mutually exclusive with version) - :paramtype label: str + :param version: Version of model asset. + :type version: str + :param label: Label of the model asset. (mutually exclusive with version) + :type label: str .. admonition:: Example: diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_deployment_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_deployment_operations.py index aae7176b955f..b44642a64863 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_deployment_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_deployment_operations.py @@ -155,8 +155,8 @@ def begin_create_or_update( not skip_script_validation and deployment and deployment.code_configuration - and not deployment.code_configuration.code.startswith(ARM_ID_PREFIX) - and not re.match(AMLVersionedArmId.REGEX_PATTERN, deployment.code_configuration.code) + and not deployment.code_configuration.code.startswith(ARM_ID_PREFIX) # type: ignore[union-attr] + and not re.match(AMLVersionedArmId.REGEX_PATTERN, deployment.code_configuration.code) # type: ignore ): validate_scoring_script(deployment) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_endpoint_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_endpoint_operations.py index 663212ca216a..a9a21ef80ffb 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_endpoint_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_endpoint_operations.py @@ -295,7 +295,7 @@ def invoke( self, endpoint_name: str, *, - request_file: Any = None, + request_file: Optional[str] = None, deployment_name: Optional[str] = None, # pylint: disable=unused-argument input_data: Optional[Union[str, Data]] = None, @@ -329,7 +329,7 @@ def invoke( if deployment_name: self._validate_deployment_name(endpoint_name, deployment_name) - with open(request_file, "rb") as f: + with open(request_file, "rb") as f: # type: ignore[arg-type] data = json.loads(f.read()) if local: return self._local_endpoint_helper.invoke( diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_schedule_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_schedule_operations.py index d00cdf6c8fd6..5fdc9de70f3e 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_schedule_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_schedule_operations.py @@ -380,14 +380,14 @@ def _resolve_monitor_schedule_arm_id( # pylint:disable=too-many-branches,too-ma # resolve ARM id for each signal and populate any defaults if needed for signal_name, signal in schedule.create_monitor.monitoring_signals.items(): # type: ignore if signal.type == MonitorSignalType.GENERATION_SAFETY_QUALITY: - for llm_data in signal.production_data: + for llm_data in signal.production_data: # type: ignore[union-attr] self._job_operations._resolve_job_input(llm_data.input_data, schedule._base_path) continue if signal.type == MonitorSignalType.CUSTOM: - if signal.inputs: - for inputs in signal.inputs.values(): + if signal.inputs: # type: ignore[union-attr] + for inputs in signal.inputs.values(): # type: ignore[union-attr] self._job_operations._resolve_job_input(inputs, schedule._base_path) - for data in signal.input_data.values(): + for data in signal.input_data.values(): # type: ignore[union-attr] if data.input_data is not None: for inputs in data.input_data.values(): self._job_operations._resolve_job_input(inputs, schedule._base_path) @@ -397,14 +397,14 @@ def _resolve_monitor_schedule_arm_id( # pylint:disable=too-many-branches,too-ma ) continue error_messages = [] - if not signal.production_data or not signal.reference_data: + if not signal.production_data or not signal.reference_data: # type: ignore[union-attr] # if there is no target dataset, we check the type of signal if signal.type in {MonitorSignalType.DATA_DRIFT, MonitorSignalType.DATA_QUALITY}: if mdc_input_enabled: - if not signal.production_data: + if not signal.production_data: # type: ignore[union-attr] # if target dataset is absent and data collector for input is enabled, # create a default target dataset with production model inputs as target - signal.production_data = ProductionData( + signal.production_data = ProductionData( # type: ignore[union-attr] input_data=Input( path=f"{model_inputs_name}:{model_inputs_version}", type=self._data_operations.get(model_inputs_name, model_inputs_version).type, @@ -414,8 +414,8 @@ def _resolve_monitor_schedule_arm_id( # pylint:disable=too-many-branches,too-ma lookback_window_size="default", lookback_window_offset="P0D" ), ) - if not signal.reference_data: - signal.reference_data = ReferenceData( + if not signal.reference_data: # type: ignore[union-attr] + signal.reference_data = ReferenceData( # type: ignore[union-attr] input_data=Input( path=f"{model_inputs_name}:{model_inputs_version}", type=self._data_operations.get(model_inputs_name, model_inputs_version).type, @@ -425,7 +425,9 @@ def _resolve_monitor_schedule_arm_id( # pylint:disable=too-many-branches,too-ma lookback_window_size="default", lookback_window_offset="default" ), ) - elif not mdc_input_enabled and not (signal.production_data and signal.reference_data): + elif not mdc_input_enabled and not ( + signal.production_data and signal.reference_data # type: ignore[union-attr] + ): # if target or baseline dataset is absent and data collector for input is not enabled, # collect exception message msg = ( @@ -436,10 +438,10 @@ def _resolve_monitor_schedule_arm_id( # pylint:disable=too-many-branches,too-ma error_messages.append(msg) elif signal.type == MonitorSignalType.PREDICTION_DRIFT: if mdc_output_enabled: - if not signal.production_data: + if not signal.production_data: # type: ignore[union-attr] # if target dataset is absent and data collector for output is enabled, # create a default target dataset with production model outputs as target - signal.production_data = ProductionData( + signal.production_data = ProductionData( # type: ignore[union-attr] input_data=Input( path=f"{model_outputs_name}:{model_outputs_version}", type=self._data_operations.get(model_outputs_name, model_outputs_version).type, @@ -449,8 +451,8 @@ def _resolve_monitor_schedule_arm_id( # pylint:disable=too-many-branches,too-ma lookback_window_size="default", lookback_window_offset="P0D" ), ) - if not signal.reference_data: - signal.reference_data = ReferenceData( + if not signal.reference_data: # type: ignore[union-attr] + signal.reference_data = ReferenceData( # type: ignore[union-attr] input_data=Input( path=f"{model_outputs_name}:{model_outputs_version}", type=self._data_operations.get(model_outputs_name, model_outputs_version).type, @@ -460,7 +462,9 @@ def _resolve_monitor_schedule_arm_id( # pylint:disable=too-many-branches,too-ma lookback_window_size="default", lookback_window_offset="default" ), ) - elif not mdc_output_enabled and not (signal.production_data and signal.reference_data): + elif not mdc_output_enabled and not ( + signal.production_data and signal.reference_data # type: ignore[union-attr] + ): # if target dataset is absent and data collector for output is not enabled, # collect exception message msg = ( @@ -471,10 +475,10 @@ def _resolve_monitor_schedule_arm_id( # pylint:disable=too-many-branches,too-ma error_messages.append(msg) elif signal.type == MonitorSignalType.FEATURE_ATTRIBUTION_DRIFT: if mdc_input_enabled: - if not signal.production_data: + if not signal.production_data: # type: ignore[union-attr] # if production dataset is absent and data collector for input is enabled, # create a default prod dataset with production model inputs and outputs as target - signal.production_data = [ + signal.production_data = [ # type: ignore[union-attr] FADProductionData( input_data=Input( path=f"{model_inputs_name}:{model_inputs_version}", @@ -496,7 +500,7 @@ def _resolve_monitor_schedule_arm_id( # pylint:disable=too-many-branches,too-ma ), ), ] - elif not mdc_output_enabled and not signal.production_data: + elif not mdc_output_enabled and not signal.production_data: # type: ignore[union-attr] # if target dataset is absent and data collector for output is not enabled, # collect exception message msg = ( @@ -516,27 +520,32 @@ def _resolve_monitor_schedule_arm_id( # pylint:disable=too-many-branches,too-ma ErrorCategory=ErrorCategory.USER_ERROR, ) if signal.type == MonitorSignalType.FEATURE_ATTRIBUTION_DRIFT: - for prod_data in signal.production_data: + for prod_data in signal.production_data: # type: ignore[union-attr] self._job_operations._resolve_job_input(prod_data.input_data, schedule._base_path) - prod_data.pre_processing_component = self._orchestrators.get_asset_arm_id( - asset=prod_data.pre_processing_component, azureml_type=AzureMLResourceType.COMPONENT + prod_data.pre_processing_component = self._orchestrators.get_asset_arm_id( # type: ignore + asset=prod_data.pre_processing_component, # type: ignore[union-attr] + azureml_type=AzureMLResourceType.COMPONENT, ) - self._job_operations._resolve_job_input(signal.reference_data.input_data, schedule._base_path) - signal.reference_data.pre_processing_component = self._orchestrators.get_asset_arm_id( - asset=signal.reference_data.pre_processing_component, azureml_type=AzureMLResourceType.COMPONENT + self._job_operations._resolve_job_input( + signal.reference_data.input_data, schedule._base_path # type: ignore[union-attr] + ) + signal.reference_data.pre_processing_component = self._orchestrators.get_asset_arm_id( # type: ignore + asset=signal.reference_data.pre_processing_component, # type: ignore[union-attr] + azureml_type=AzureMLResourceType.COMPONENT, ) continue self._job_operations._resolve_job_inputs( - [signal.production_data.input_data, signal.reference_data.input_data], + [signal.production_data.input_data, signal.reference_data.input_data], # type: ignore[union-attr] schedule._base_path, ) - signal.production_data.pre_processing_component = self._orchestrators.get_asset_arm_id( - asset=signal.production_data.pre_processing_component, + signal.production_data.pre_processing_component = self._orchestrators.get_asset_arm_id( # type: ignore + asset=signal.production_data.pre_processing_component, # type: ignore[union-attr] azureml_type=AzureMLResourceType.COMPONENT, ) - signal.reference_data.pre_processing_component = self._orchestrators.get_asset_arm_id( - asset=signal.reference_data.pre_processing_component, azureml_type=AzureMLResourceType.COMPONENT + signal.reference_data.pre_processing_component = self._orchestrators.get_asset_arm_id( # type: ignore + asset=signal.reference_data.pre_processing_component, # type: ignore[union-attr] + azureml_type=AzureMLResourceType.COMPONENT, ) def _process_and_get_endpoint_deployment_names_from_id(self, target: MonitoringTarget) -> Tuple: diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_connections_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_connections_operations.py index 81356c5977e3..41e6049b6206 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_connections_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_connections_operations.py @@ -131,16 +131,17 @@ def delete(self, name: str) -> None: @monitor_with_activity(ops_logger, "WorkspaceConnections.List", ActivityType.PUBLICAPI) def list( self, + *, connection_type: Optional[str] = None, include_data_connections: bool = False, **kwargs: Any, ) -> Iterable[WorkspaceConnection]: """List all workspace connections for a workspace. - :param connection_type: Type of workspace connection to list. - :type connection_type: Optional[str] - :param include_data_connections: If true, also return data connections. Defaults to False. - :type include_data_connections: bool + :keyword connection_type: Type of workspace connection to list. + :paramtype connection_type: Optional[str] + :keyword include_data_connections: If true, also return data connections. Defaults to False. + :paramtype include_data_connections: bool :return: An iterator like instance of workspace connection objects :rtype: Iterable[~azure.ai.ml.entities.WorkspaceConnection]