Skip to content
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,14 @@ def get_sql_recommended_elastic_pools_operations(kwargs):
return get_sql_management_client(kwargs).recommended_elastic_pools


def get_sql_database_blob_auditing_policies_operations(kwargs):
return get_sql_management_client(kwargs).database_blob_auditing_policies


def get_sql_database_threat_detection_policies_operations(kwargs):
return get_sql_management_client(kwargs).database_threat_detection_policies


# COMMANDS UTILITIES

def create_service_adapter(service_model, service_class):
Expand Down Expand Up @@ -86,7 +94,9 @@ def custom_command(self, name, custom_func_name, confirmation=None):
client_factory=self._client_factory,
confirmation=confirmation)

def generic_update_command(self, name, getter_op, setter_op, custom_func_name=None):
# pylint: disable=too-many-arguments
def generic_update_command(self, name, getter_op, setter_op, custom_func_name=None,
setter_arg_name='parameters'):
if custom_func_name:
custom_function_op = self._custom_path.format(custom_func_name)
else:
Expand All @@ -98,7 +108,8 @@ def generic_update_command(self, name, getter_op, setter_op, custom_func_name=No
self._service_adapter(getter_op),
self._service_adapter(setter_op),
factory=self._client_factory,
custom_function_op=custom_function_op)
custom_function_op=custom_function_op,
setter_arg_name=setter_arg_name)


# PARAMETERS UTILITIES
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,14 @@
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------

from ._util import (get_sql_servers_operation, get_sql_database_operations,
get_sql_elasticpools_operations, create_service_adapter, ServiceGroup)
from ._util import (
get_sql_servers_operation,
get_sql_database_operations,
get_sql_database_blob_auditing_policies_operations,
get_sql_database_threat_detection_policies_operations,
get_sql_elasticpools_operations,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are those related to the change?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes these functions are used below

create_service_adapter,
ServiceGroup)

###############################################
# sql db #
Expand Down Expand Up @@ -59,6 +65,33 @@
# c.command('list', 'list_service_tier_advisors')
# c.command('show', 'get_service_tier_advisor')

database_blob_auditing_policy_operations = create_service_adapter(
'azure.mgmt.sql.operations.database_blob_auditing_policies_operations',
'DatabaseBlobAuditingPoliciesOperations')

with ServiceGroup(__name__,
get_sql_database_blob_auditing_policies_operations,
database_blob_auditing_policy_operations) as s:
with s.group('sql db audit-policy') as c:
c.command('show', 'get')
c.generic_update_command(
'update', 'get', 'create_or_update',
custom_func_name='db_audit_policy_update',
setter_arg_name='database_blob_auditing_policy')

database_threat_detection_policy_operations = create_service_adapter(
'azure.mgmt.sql.operations.database_threat_detection_policies_operations',
'DatabaseThreatDetectionPoliciesOperations')

with ServiceGroup(__name__,
get_sql_database_threat_detection_policies_operations,
database_threat_detection_policy_operations) as s:
with s.group('sql db threat-policy') as c:
c.command('show', 'get')
c.generic_update_command('update', 'get', 'create_or_update',
custom_func_name='db_threat_detection_policy_update',
setter_arg_name='database_security_alert_policy')

###############################################
# sql elastic-pool #
###############################################
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,23 @@
get_sql_elasticpools_operations
)

from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.commands.client_factory import (
get_mgmt_service_client,
get_subscription_id)
from azure.cli.core._util import CLIError
from azure.mgmt.sql.models.sql_management_client_enums import (
BlobAuditingPolicyState,
CreateMode,
DatabaseEditions,
ReplicationRole,
ServiceObjectiveName,
SecurityAlertPolicyState,
ServiceObjectiveName
)
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.storage import StorageManagementClient

# url parse package has different names in Python 2 and 3. 'six' package works cross-version.
from six.moves.urllib.parse import (quote, urlparse) # pylint: disable=import-error

###############################################
# Common funcs #
Expand Down Expand Up @@ -98,8 +107,6 @@ def _db_create_special(
resource_group_name=dest_db.resource_group_name)

# Set create mode properties
# url parse package has different names in Python 2 and 3. 'six' package works cross-version.
from six.moves.urllib.parse import quote # pylint: disable=import-error
subscription_id = get_subscription_id()
kwargs['source_database_id'] = (
'/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Sql/servers/{}/databases/{}'
Expand Down Expand Up @@ -336,6 +343,196 @@ def db_update(
return instance


#####
# sql server audit-policy & threat-policy
#####


# Finds a storage account's resource group by querying ARM resource cache.
# Why do we have to do this: so we know the resource group in order to later query the storage API
# to determine the account's keys and endpoint. Why isn't this just a command line parameter:
# because if it was a command line parameter then the customer would need to specify storage
# resource group just to update some unrelated property, which is annoying and makes no sense to
# the customer.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1

def _find_storage_account(name):
resource_type = 'Microsoft.Storage/storageAccounts'
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what about classic storage accounts?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Azure CLI doesn't support creating classic storage accounts, so I have no way to create them for automated test. On top of that, there is no python client library, so I would need to handwrite http request - exactly the kind of thing you would want to have automated. The customer can still use classic storage by specifying storage endpoint and key instead of account name, so classic storage isn't totally blocked, just inconvenient. I'm ok with this for the first release. I'm adding explicit checks.


client = get_mgmt_service_client(ResourceManagementClient)
resources = list(client.resources.list(
filter="name eq '{}' and resourceType eq '{}'"
.format(name, resource_type)))

if len(resources) == 0:
raise CLIError('No resource with name {} and type {} was found.'
.format(name, resource_type))

if len(resources) > 1:
raise CLIError('Multiple resources with name {} and type {} were found.'
.format(name, resource_type))

# Split the uri and return just the resource group
return resources[0].id.split('/')[4]


# Determines storage account name from endpoint url string.
# e.g. 'https://mystorage.blob.core.windows.net' -> 'mystorage'
def _get_storage_account_name(storage_endpoint):
return urlparse(storage_endpoint).netloc.split('.')[0]


# Gets storage account key by querying storage ARM API.
def _get_storage_endpoint(
storage_account,
resource_group_name):

# Get storage account
client = get_mgmt_service_client(StorageManagementClient)
account = client.storage_accounts.get_properties(
resource_group_name=resource_group_name,
account_name=storage_account)

# Get endpoint
return account.primary_endpoints.blob # pylint: disable=no-member
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

there are storage accounts without blob parameter.
we need to check that and return a meaningful error asking the user to choose a different storage.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok. I'm not sure how to create such a storage account, so I'm taking your word for it ;) I tested by replacing primary_endpoints.blob with primary_endpoints.potato and then handling that exception.



# Gets storage account key by querying storage ARM API.
def _get_storage_key(
storage_account,
resource_group_name,
use_secondary_key):

# Get storage keys
client = get_mgmt_service_client(StorageManagementClient)
keys = client.storage_accounts.list_keys(
resource_group_name=resource_group_name,
account_name=storage_account)

# Choose storage key
index = 1 if use_secondary_key else 0
return keys.keys[index].value # pylint: disable=no-member


# Common code for updating audit and threat detection policy
def _db_security_policy_update( # pylint: disable=too-many-arguments
instance,
enabled,
storage_account,
storage_endpoint,
storage_account_access_key,
use_secondary_key):

# Validate storage endpoint arguments
if storage_endpoint is not None and storage_account is not None:
raise CLIError('--storage-endpoint and --storage-account cannot both be specified.')

# Set storage endpoint
if storage_endpoint is not None:
instance.storage_endpoint = storage_endpoint
if storage_account is not None:
storage_resource_group = _find_storage_account(storage_account)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"_find_storage_account_resource_group"?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure.

instance.storage_endpoint = _get_storage_endpoint(storage_account, storage_resource_group)

# Set storage access key
if storage_account_access_key is not None:
# Access key is specified
instance.storage_account_access_key = storage_account_access_key
elif enabled:
# Access key is not specified, but state is Enabled.
# If state is Enabled, then access key property is required in PUT. However access key is
# readonly (GET returns empty string for access key), so we need to determine the value
# and then PUT it back. (We don't want the user to be force to specify this, because that
# would be very annoying when updating non-storage-related properties).
# This doesn't work if the user used generic update args, i.e. `--set state=Enabled`
# instead of `--state Enabled`, since the generic update args are applied after this custom
# function, but at least we tried.
if storage_account is None:
storage_account = _get_storage_account_name(instance.storage_endpoint)
storage_resource_group = _find_storage_account(storage_account)

instance.storage_account_access_key = _get_storage_key(
storage_account,
storage_resource_group,
use_secondary_key)


# Update audit policy. Custom update function to apply parameters to instance.
def db_audit_policy_update( # pylint: disable=too-many-arguments
instance,
state=None,
storage_account=None,
storage_endpoint=None,
storage_account_access_key=None,
audit_actions_and_groups=None,
retention_days=None):

# Apply state
if state is not None:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do we have parameters value validation? (e.g. in the "state" case the valid set is "Enabled" and "Disabled")

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, it will be validated due to enum_choice_list in params.py

(env) D:\git\azure-cli [datasecurity ≡]> az sql db audit-policy update --state adf
az sql db audit-policy update: error: argument --state: invalid choice: 'adf' (choose from 'Enabled', 'Disabled')

# pylint: disable=unsubscriptable-object
instance.state = BlobAuditingPolicyState[state.lower()]
enabled = instance.state.value.lower() == BlobAuditingPolicyState.enabled.value.lower()

# Set storage-related properties
_db_security_policy_update(
instance,
enabled,
storage_account,
storage_endpoint,
storage_account_access_key,
instance.is_storage_secondary_key_in_use)

# Set other properties
if audit_actions_and_groups is not None:
instance.audit_actions_and_groups = audit_actions_and_groups

if retention_days is not None:
instance.retention_days = retention_days

return instance


# Update threat detection policy. Custom update function to apply parameters to instance.
def db_threat_detection_policy_update( # pylint: disable=too-many-arguments
instance,
state=None,
storage_account=None,
storage_endpoint=None,
storage_account_access_key=None,
retention_days=None,
email_addresses=None,
disabled_alerts=None,
email_account_admins=None):

# Apply state
if state is not None:
# pylint: disable=unsubscriptable-object
instance.state = SecurityAlertPolicyState[state.lower()]
enabled = instance.state.value.lower() == SecurityAlertPolicyState.enabled.value.lower()

# Set storage-related properties
_db_security_policy_update(
instance,
enabled,
storage_account,
storage_endpoint,
storage_account_access_key,
False)

# Set other properties
if retention_days is not None:
instance.retention_days = retention_days

if email_addresses is not None:
instance.email_addresses = ";".join(email_addresses)

if disabled_alerts is not None:
instance.disabled_alerts = ";".join(disabled_alerts)

if email_account_admins is not None:
instance.email_account_admins = email_account_admins

return instance


###############################################
# sql dw #
###############################################
Expand Down
Loading