Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,10 @@
from ._models import Userargs
from ._hd_insight_job_client_enums import (
ApplicationState,
JobState,
SessionJobKind,
StatementState,
StatementExecutionStatus,
)

__all__ = [
Expand Down Expand Up @@ -87,5 +90,8 @@
'Status',
'Userargs',
'ApplicationState',
'JobState',
'SessionJobKind',
'StatementState',
'StatementExecutionStatus',
]
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,41 @@ class ApplicationState(str, Enum):
killed = "KILLED"


class JobState(str, Enum):

not_started = "not_started"
starting = "starting"
idle = "idle"
running = "running"
busy = "busy"
shutting_down = "shutting_down"
error = "error"
dead = "dead"
killed = "killed"
success = "success"
recovering = "recovering"


class SessionJobKind(str, Enum):

spark = "spark"
pyspark = "pyspark"
sparkr = "sparkr"
sql = "sql"


class StatementState(str, Enum):

waiting = "waiting"
running = "running"
available = "available"
error = "error"
cancelling = "cancelling"
cancelled = "cancelled"


class StatementExecutionStatus(str, Enum):

ok = "ok"
error = "error"
abort = "abort"
160 changes: 86 additions & 74 deletions sdk/azure-hdinsight-job/azure/hdinsight/job/models/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,15 +224,17 @@ def __init__(self, **kwargs):
class SparkBatchJob(Model):
"""SparkBatchJob.

:param id:
:param id: The livy id of the spark batch job.
:type id: int
:param app_id:
:param app_id: The application id of this job.
:type app_id: str
:param app_info:
:param app_info: The detailed application info.
:type app_info: dict[str, str]
:param state:
:type state: str
:param log_lines:
:param state: The current state of the spark batch job. Possible values
include: 'not_started', 'starting', 'idle', 'running', 'busy',
'shutting_down', 'error', 'dead', 'killed', 'success', 'recovering'
:type state: str or ~azure.hdinsight.job.models.JobState
:param log_lines: The log lines.
:type log_lines: list[str]
"""

Expand All @@ -256,11 +258,11 @@ def __init__(self, **kwargs):
class SparkBatchJobCollection(Model):
"""SparkBatchJobCollection.

:param from_property:
:param from_property: The start index to fetch Spark Batch jobs.
:type from_property: int
:param total:
:param total: Number of Spark Batch jobs to fetch.
:type total: int
:param sessions:
:param sessions: List of spark batch jobs.
:type sessions: list[~azure.hdinsight.job.models.SparkBatchJob]
"""

Expand All @@ -280,37 +282,37 @@ def __init__(self, **kwargs):
class SparkBatchJobRequest(Model):
"""SparkBatchJobRequest.

:param file:
:param file: File containing the application to execute.
:type file: str
:param proxy_user:
:param proxy_user: User to impersonate when running the job.
:type proxy_user: str
:param class_name:
:param class_name: Application Java/Spark main class.
:type class_name: str
:param arguments:
:param arguments: Command line arguments for the application.
:type arguments: list[str]
:param jars:
:param jars: Jars to be used in this batch job.
:type jars: list[str]
:param python_files:
:param python_files: Python files to be used in this batch job.
:type python_files: list[str]
:param files:
:param files: Files to be used in this batch job.
:type files: list[str]
:param driver_memory:
:param driver_memory: Amount of memory to use for the driver process.
:type driver_memory: str
:param driver_cores:
:param driver_cores: Number of cores to use for the driver process.
:type driver_cores: int
:param executor_memory:
:param executor_memory: Amount of memory to use per executor process.
:type executor_memory: str
:param executor_cores:
:param executor_cores: Number of cores to use for each executor.
:type executor_cores: int
:param executor_count:
:param executor_count: Number of executors to launch for this batch job.
:type executor_count: int
:param archives:
:param archives: Archives to be used in this batch job.
:type archives: list[str]
:param queue:
:param queue: The name of the YARN queue to which submitted.
:type queue: str
:param name:
:param name: The name of this batch job.
:type name: str
:param configuration:
:param configuration: Spark configuration properties.
:type configuration: dict[str, str]
"""

Expand Down Expand Up @@ -372,15 +374,15 @@ def __init__(self, **kwargs):
class SparkJobLog(Model):
"""SparkJobLog.

:param id:
:param id: The livy id of the spark job.
:type id: int
:param from_property:
:param from_property: Offset from start of log.
:type from_property: int
:param size:
:param size: Max number of log lines.
:type size: int
:param total:
:param total: Total number of log lines.
:type total: long
:param log_lines:
:param log_lines: The log lines.
:type log_lines: list[str]
"""

Expand All @@ -404,10 +406,12 @@ def __init__(self, **kwargs):
class SparkJobState(Model):
"""SparkJobState.

:param id:
:param id: The livy id of the spark job.
:type id: int
:param state:
:type state: str
:param state: The current state of the spark job. Possible values include:
'not_started', 'starting', 'idle', 'running', 'busy', 'shutting_down',
'error', 'dead', 'killed', 'success', 'recovering'
:type state: str or ~azure.hdinsight.job.models.JobState
"""

_attribute_map = {
Expand All @@ -424,11 +428,11 @@ def __init__(self, **kwargs):
class SparkSessionCollection(Model):
"""SparkSessionCollection.

:param from_property:
:param from_property: The start index to fetch spark sessions.
:type from_property: int
:param total:
:param total: Number of spark sessions to fetch.
:type total: int
:param sessions:
:param sessions: List of spark sessions.
:type sessions: list[~azure.hdinsight.job.models.SparkSessionJob]
"""

Expand All @@ -448,21 +452,24 @@ def __init__(self, **kwargs):
class SparkSessionJob(Model):
"""SparkSessionJob.

:param id:
:param id: The livy id of the spark session job.
:type id: int
:param app_id:
:param app_id: The application id of this job.
:type app_id: str
:param owner:
:param owner: Remote user who submitted this job.
:type owner: str
:param proxy_user:
:param proxy_user: User to impersonate when running.
:type proxy_user: str
:param kind:
:type kind: str
:param log_lines:
:param kind: Spark session job kind. Possible values include: 'spark',
'pyspark', 'sparkr', 'sql'
:type kind: str or ~azure.hdinsight.job.models.SessionJobKind
:param log_lines: The log lines.
:type log_lines: list[str]
:param state:
:type state: str
:param app_info:
:param state: The current state of the spark session job. Possible values
include: 'not_started', 'starting', 'idle', 'running', 'busy',
'shutting_down', 'error', 'dead', 'killed', 'success', 'recovering'
:type state: str or ~azure.hdinsight.job.models.JobState
:param app_info: The detailed application info.
:type app_info: dict[str, str]
"""

Expand Down Expand Up @@ -492,35 +499,37 @@ def __init__(self, **kwargs):
class SparkSessionJobRequest(Model):
"""SparkSessionJobRequest.

:param kind: Possible values include: 'spark', 'pyspark', 'sparkr', 'sql'
:param kind: Spark session job kind. Possible values include: 'spark',
'pyspark', 'sparkr', 'sql'
:type kind: str or ~azure.hdinsight.job.models.SessionJobKind
:param proxy_user:
:param proxy_user: User to impersonate when starting the session.
:type proxy_user: str
:param jars:
:param jars: Jars to be used in this session.
:type jars: list[str]
:param python_files:
:param python_files: Python files to be used in this session.
:type python_files: list[str]
:param files:
:param files: Files to be used in this session.
:type files: list[str]
:param driver_memory:
:param driver_memory: Amount of memory to use for the driver process.
:type driver_memory: str
:param driver_cores:
:param driver_cores: Number of cores to use for the driver process.
:type driver_cores: int
:param executor_memory:
:param executor_memory: Amount of memory to use per executor process.
:type executor_memory: str
:param executor_cores:
:param executor_cores: Number of cores to use for each executor.
:type executor_cores: int
:param executor_count:
:param executor_count: Number of executors to launch for this session.
:type executor_count: int
:param archives:
:param archives: Archives to be used in this session.
:type archives: list[str]
:param queue:
:param queue: The name of the YARN queue to which submitted.
:type queue: str
:param name:
:param name: The name of this session.
:type name: str
:param configuration:
:param configuration: Spark configuration properties.
:type configuration: dict[str, str]
:param heartbeat_timeout_in_second:
:param heartbeat_timeout_in_second: Timeout in second to which session be
orphaned.
:type heartbeat_timeout_in_second: int
"""

Expand Down Expand Up @@ -564,15 +573,17 @@ def __init__(self, **kwargs):
class SparkStatement(Model):
"""SparkStatement.

:param id:
:param id: The livy id of the spark statement job.
:type id: int
:param code:
:param code: The execution code.
:type code: str
:param state:
:type state: str
:param output:
:param state: The current state of the spark statement. Possible values
include: 'waiting', 'running', 'available', 'error', 'cancelling',
'cancelled'
:type state: str or ~azure.hdinsight.job.models.StatementState
:param output: The execution output.
:type output: ~azure.hdinsight.job.models.SparkStatementOutput
:param progress:
:param progress: The execution progress.
:type progress: float
"""

Expand Down Expand Up @@ -612,7 +623,7 @@ def __init__(self, **kwargs):
class SparkStatementCollection(Model):
"""SparkStatementCollection.

:param statements:
:param statements: List of spark statements.
:type statements: list[~azure.hdinsight.job.models.SparkStatement]
"""

Expand All @@ -628,11 +639,12 @@ def __init__(self, **kwargs):
class SparkStatementOutput(Model):
"""SparkStatementOutput.

:param status:
:type status: str
:param execution_count:
:param status: Execution status. Possible values include: 'ok', 'error',
'abort'
:type status: str or ~azure.hdinsight.job.models.StatementExecutionStatus
:param execution_count: A monotonically increasing number.
:type execution_count: int
:param data:
:param data: Statement output.
:type data: object
"""

Expand All @@ -654,8 +666,8 @@ class SparkStatementRequest(Model):

:param code:
:type code: str
:param kind:
:type kind: str
:param kind: Possible values include: 'spark', 'pyspark', 'sparkr', 'sql'
:type kind: str or ~azure.hdinsight.job.models.SessionJobKind
"""

_attribute_map = {
Expand Down
Loading