+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: blockinfile
+author:
+ - 'YAEGASHI Takeshi (@yaegashi)'
+extends_documentation_fragment:
+ - files
+ - validate
+short_description: Insert/update/remove a text block
+ surrounded by marker lines.
+version_added: '2.0'
+description:
+ - This module will insert/update/remove a block of multi-line text
+ surrounded by customizable marker lines.
+notes:
+ - This module supports check mode.
+ - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
+options:
+ dest:
+ aliases: [ name, destfile ]
+ required: true
+ description:
+ - The file to modify.
+ state:
+ required: false
+ choices: [ present, absent ]
+ default: present
+ description:
+ - Whether the block should be there or not.
+ marker:
+ required: false
+ default: '# {mark} ANSIBLE MANAGED BLOCK'
+ description:
+ - The marker line template.
+ "{mark}" will be replaced with "BEGIN" or "END".
+ block:
+ aliases: [ content ]
+ required: false
+ default: ''
+ description:
+ - The text to insert inside the marker lines.
+ If it's missing or an empty string,
+ the block will be removed as if C(state) were specified to C(absent).
+ insertafter:
+ required: false
+ default: EOF
+ description:
+ - If specified, the block will be inserted after the last match of
+ specified regular expression. A special value is available; C(EOF) for
+ inserting the block at the end of the file. If specified regular
+ expresion has no matches, C(EOF) will be used instead.
+ choices: [ 'EOF', '*regex*' ]
+ insertbefore:
+ required: false
+ default: None
+ description:
+ - If specified, the block will be inserted before the last match of
+ specified regular expression. A special value is available; C(BOF) for
+ inserting the block at the beginning of the file. If specified regular
+ expresion has no matches, the block will be inserted at the end of the
+ file.
+ choices: [ 'BOF', '*regex*' ]
+ create:
+ required: false
+ default: 'no'
+ choices: [ 'yes', 'no' ]
+ description:
+ - Create a new file if it doesn't exist.
+ backup:
+ required: false
+ default: 'no'
+ choices: [ 'yes', 'no' ]
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ follow:
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ description:
+ - 'This flag indicates that filesystem links, if they exist, should be followed.'
+ version_added: "2.1"
+"""
+
+EXAMPLES = r"""
+- name: insert/update "Match User" configuation block in /etc/ssh/sshd_config
+ blockinfile:
+ dest: /etc/ssh/sshd_config
+ block: |
+ Match User ansible-agent
+ PasswordAuthentication no
+
+- name: insert/update eth0 configuration stanza in /etc/network/interfaces
+ (it might be better to copy files into /etc/network/interfaces.d/)
+ blockinfile:
+ dest: /etc/network/interfaces
+ block: |
+ iface eth0 inet static
+ address 192.0.2.23
+ netmask 255.255.255.0
+
+- name: insert/update HTML surrounded by custom markers after line
+ blockinfile:
+ dest: /var/www/html/index.html
+ marker: ""
+ insertafter: ""
+ content: |
+ Welcome to {{ansible_hostname}}
+ Last updated on {{ansible_date_time.iso8601}}
+
+- name: remove HTML as well as surrounding markers
+ blockinfile:
+ dest: /var/www/html/index.html
+ marker: ""
+ content: ""
+
+- name: Add mappings to /etc/hosts
+ blockinfile:
+ dest: /etc/hosts
+ block: |
+ {{item.ip}} {{item.name}}
+ marker: "# {mark} ANSIBLE MANAGED BLOCK {{item.name}}"
+ with_items:
+ - { name: host1, ip: 10.10.1.10 }
+ - { name: host2, ip: 10.10.1.11 }
+ - { name: host3, ip: 10.10.1.12 }
+"""
+
+import re
+import os
+import tempfile
+from ansible.module_utils.six import b
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes
+
+def write_changes(module, contents, dest):
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(contents)
+ f.close()
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(validate % tmpfile)
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc, err))
+ if valid:
+ module.atomic_move(tmpfile, dest, unsafe_writes=module.params['unsafe_writes'])
+
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_file_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(required=True, aliases=['name', 'destfile'], type='path'),
+ state=dict(default='present', choices=['absent', 'present']),
+ marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'),
+ block=dict(default='', type='str', aliases=['content']),
+ insertafter=dict(default=None),
+ insertbefore=dict(default=None),
+ create=dict(default=False, type='bool'),
+ backup=dict(default=False, type='bool'),
+ validate=dict(default=None, type='str'),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+
+ params = module.params
+ dest = params['dest']
+ if module.boolean(params.get('follow', None)):
+ dest = os.path.realpath(dest)
+
+ if os.path.isdir(dest):
+ module.fail_json(rc=256,
+ msg='Destination %s is a directory !' % dest)
+
+ path_exists = os.path.exists(dest)
+ if not path_exists:
+ if not module.boolean(params['create']):
+ module.fail_json(rc=257,
+ msg='Destination %s does not exist !' % dest)
+ original = None
+ lines = []
+ else:
+ f = open(dest, 'rb')
+ original = f.read()
+ f.close()
+ lines = original.splitlines()
+
+ insertbefore = params['insertbefore']
+ insertafter = params['insertafter']
+ block = to_bytes(params['block'])
+ marker = to_bytes(params['marker'])
+ present = params['state'] == 'present'
+
+ if not present and not path_exists:
+ module.exit_json(changed=False, msg="File not present")
+
+ if insertbefore is None and insertafter is None:
+ insertafter = 'EOF'
+
+ if insertafter not in (None, 'EOF'):
+ insertre = re.compile(insertafter)
+ elif insertbefore not in (None, 'BOF'):
+ insertre = re.compile(insertbefore)
+ else:
+ insertre = None
+
+ marker0 = re.sub(b(r'{mark}'), b('BEGIN'), marker)
+ marker1 = re.sub(b(r'{mark}'), b('END'), marker)
+ if present and block:
+ # Escape seqeuences like '\n' need to be handled in Ansible 1.x
+ if module.ansible_version.startswith('1.'):
+ block = re.sub('', block, '')
+ blocklines = [marker0] + block.splitlines() + [marker1]
+ else:
+ blocklines = []
+
+ n0 = n1 = None
+ for i, line in enumerate(lines):
+ if line == marker0:
+ n0 = i
+ if line == marker1:
+ n1 = i
+
+ if None in (n0, n1):
+ n0 = None
+ if insertre is not None:
+ for i, line in enumerate(lines):
+ if insertre.search(line):
+ n0 = i
+ if n0 is None:
+ n0 = len(lines)
+ elif insertafter is not None:
+ n0 += 1
+ elif insertbefore is not None:
+ n0 = 0 # insertbefore=BOF
+ else:
+ n0 = len(lines) # insertafter=EOF
+ elif n0 < n1:
+ lines[n0:n1+1] = []
+ else:
+ lines[n1:n0+1] = []
+ n0 = n1
+
+ lines[n0:n0] = blocklines
+
+ if lines:
+ result = b('\n').join(lines)
+ if original is None or original.endswith(b('\n')):
+ result += b('\n')
+ else:
+ result = ''
+ if original == result:
+ msg = ''
+ changed = False
+ elif original is None:
+ msg = 'File created'
+ changed = True
+ elif not blocklines:
+ msg = 'Block removed'
+ changed = True
+ else:
+ msg = 'Block inserted'
+ changed = True
+
+ if changed and not module.check_mode:
+ if module.boolean(params['backup']) and path_exists:
+ module.backup_local(dest)
+ write_changes(module, result, dest)
+
+ if module.check_mode and not path_exists:
+ module.exit_json(changed=changed, msg=msg)
+
+ msg, changed = check_file_attrs(module, changed, msg)
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/files/patch.py b/files/patch.py
index 576333c38f8..c5aecf4e0d4 100644
--- a/files/patch.py
+++ b/files/patch.py
@@ -19,6 +19,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: patch
@@ -46,17 +50,17 @@
src:
description:
- Path of the patch file as accepted by the GNU patch tool. If
- C(remote_src) is False, the patch source file is looked up from the
+ C(remote_src) is 'no', the patch source file is looked up from the
module's "files" directory.
required: true
aliases: [ "patchfile" ]
remote_src:
description:
- - If False, it will search for src at originating/master machine, if True it will
- go to the remote/target machine for the src. Default is False.
- choices: [ "True", "False" ]
+ - If C(no), it will search for src at originating/master machine, if C(yes) it will
+ go to the remote/target machine for the src. Default is C(no).
+ choices: [ "yes", "no" ]
required: false
- default: "False"
+ default: "no"
strip:
description:
- Number that indicates the smallest prefix containing leading slashes
@@ -70,15 +74,17 @@
description:
- passes --backup --version-control=numbered to patch,
producing numbered backup copies
+ choices: [ 'yes', 'no' ]
+ default: 'no'
binary:
version_added: "2.0"
description:
- - Setting to true will disable patch's heuristic for transforming CRLF
+ - Setting to C(yes) will disable patch's heuristic for transforming CRLF
line endings into LF. Line endings of src and dest must match. If set to
- False, patch will replace CRLF in src files on POSIX.
+ C(no), patch will replace CRLF in src files on POSIX.
required: false
type: "bool"
- default: "False"
+ default: "no"
note:
- This module requires GNU I(patch) utility to be installed on the remote host.
'''
@@ -183,11 +189,14 @@ def main():
apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip,
dry_run=module.check_mode, backup=p.backup )
changed = True
- except PatchError, e:
+ except PatchError:
+ e = get_exception()
module.fail_json(msg=str(e))
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/files/tempfile.py b/files/tempfile.py
new file mode 100644
index 00000000000..021c88dbbb1
--- /dev/null
+++ b/files/tempfile.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+#coding: utf-8 -*-
+
+# (c) 2016 Krzysztof Magosa
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: tempfile
+version_added: "2.3"
+author:
+ - Krzysztof Magosa
+short_description: Creates temporary files and directories.
+description:
+ - The M(tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible you need to use M(file) module.
+options:
+ state:
+ description:
+ - Whether to create file or directory.
+ required: false
+ choices: [ "file", "directory" ]
+ default: file
+ path:
+ description:
+ - Location where temporary file or directory should be created. If path is not specified default system temporary directory will be used.
+ required: false
+ default: null
+ prefix:
+ description:
+ - Prefix of file/directory name created by module.
+ required: false
+ default: ansible.
+ suffix:
+ description:
+ - Suffix of file/directory name created by module.
+ required: false
+ default: ""
+'''
+
+EXAMPLES = """
+- name: create temporary build directory
+ tempfile:
+ state: directory
+ suffix: build
+
+- name: create temporary file
+ tempfile:
+ state: file
+ suffix: temp
+"""
+
+RETURN = '''
+path:
+ description: Path to created file or directory
+ returned: success
+ type: string
+ sample: "/tmp/ansible.bMlvdk"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from tempfile import mkstemp, mkdtemp
+from os import close
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='file', choices=['file', 'directory']),
+ path = dict(default=None),
+ prefix = dict(default='ansible.'),
+ suffix = dict(default='')
+ )
+ )
+
+ try:
+ if module.params['state'] == 'file':
+ handle, path = mkstemp(
+ prefix=module.params['prefix'],
+ suffix=module.params['suffix'],
+ dir=module.params['path']
+ )
+ close(handle)
+ elif module.params['state'] == 'directory':
+ path = mkdtemp(
+ prefix=module.params['prefix'],
+ suffix=module.params['suffix'],
+ dir=module.params['path']
+ )
+
+ module.exit_json(changed=True, path=path)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+if __name__ == '__main__':
+ main()
diff --git a/identity/__init__.py b/identity/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/identity/ipa/__init__.py b/identity/ipa/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/identity/ipa/ipa_group.py b/identity/ipa/ipa_group.py
new file mode 100644
index 00000000000..e34efc48daf
--- /dev/null
+++ b/identity/ipa/ipa_group.py
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipa_group
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA group
+description:
+- Add, modify and delete group within IPA server
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ external:
+ description:
+ - Allow adding external non-IPA members from trusted domains.
+ required: false
+ gidnumber:
+ description:
+ - GID (use this option to set it manually).
+ required: false
+ group:
+ description:
+ - List of group names assigned to this group.
+ - If an empty list is passed all groups will be removed from this group.
+ - If option is omitted assigned groups will not be checked or changed.
+ - Groups that are already assigned but not passed will be removed.
+ nonposix:
+ description:
+ - Create as a non-POSIX group.
+ required: false
+ user:
+ description:
+ - List of user names assigned to this group.
+ - If an empty list is passed all users will be removed from this group.
+ - If option is omitted assigned users will not be checked or changed.
+ - Users that are already assigned but not passed will be removed.
+ state:
+ description:
+ - State to ensure
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ ipa_port:
+ description: Port of IPA server
+ required: false
+ default: 443
+ ipa_host:
+ description: IP or hostname of IPA server
+ required: false
+ default: "ipa.example.com"
+ ipa_user:
+ description: Administrative account used on IPA server
+ required: false
+ default: "admin"
+ ipa_pass:
+ description: Password of administrative user
+ required: true
+ ipa_prot:
+ description: Protocol used by IPA server
+ required: false
+ default: "https"
+ choices: ["http", "https"]
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+version_added: "2.3"
+'''
+
+EXAMPLES = '''
+# Ensure group is present
+- ipa_group:
+ name: oinstall
+ gidnumber: 54321
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure that groups sysops and appops are assigned to ops but no other group
+- ipa_group:
+ name: ops
+ group:
+ - sysops
+ - appops
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure that users linus and larry are assign to the group, but no other user
+- ipa_group:
+ name: sysops
+ user:
+ - linus
+ - larry
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure group is absent
+- ipa_group:
+ name: sysops
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = '''
+group:
+ description: Group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.ipa import IPAClient
+
+class GroupIPAClient(IPAClient):
+
+ def __init__(self, module, host, port, protocol):
+ super(GroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def group_find(self, name):
+ return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name})
+
+ def group_add(self, name, item):
+ return self._post_json(method='group_add', name=name, item=item)
+
+ def group_mod(self, name, item):
+ return self._post_json(method='group_mod', name=name, item=item)
+
+ def group_del(self, name):
+ return self._post_json(method='group_del', name=name)
+
+ def group_add_member(self, name, item):
+ return self._post_json(method='group_add_member', name=name, item=item)
+
+ def group_add_member_group(self, name, item):
+ return self.group_add_member(name=name, item={'group': item})
+
+ def group_add_member_user(self, name, item):
+ return self.group_add_member(name=name, item={'user': item})
+
+ def group_remove_member(self, name, item):
+ return self._post_json(method='group_remove_member', name=name, item=item)
+
+ def group_remove_member_group(self, name, item):
+ return self.group_remove_member(name=name, item={'group': item})
+
+ def group_remove_member_user(self, name, item):
+ return self.group_remove_member(name=name, item={'user': item})
+
+
+def get_group_dict(description=None, external=None, gid=None, nonposix=None):
+ group = {}
+ if description is not None:
+ group['description'] = description
+ if external is not None:
+ group['external'] = external
+ if gid is not None:
+ group['gidnumber'] = gid
+ if nonposix is not None:
+ group['nonposix'] = nonposix
+ return group
+
+
+def get_group_diff(ipa_group, module_group):
+ data = []
+ # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed.
+ if 'nonposix' in module_group:
+ # Only non-posix groups can be changed to posix
+ if not module_group['nonposix'] and ipa_group.get('nonposix'):
+ module_group['posix'] = True
+ del module_group['nonposix']
+
+ for key in module_group.keys():
+ module_value = module_group.get(key, None)
+ ipa_value = ipa_group.get(key, None)
+ if isinstance(ipa_value, list) and not isinstance(module_value, list):
+ module_value = [module_value]
+ if isinstance(ipa_value, list) and isinstance(module_value, list):
+ ipa_value = sorted(ipa_value)
+ module_value = sorted(module_value)
+ if ipa_value != module_value:
+ data.append(key)
+ return data
+
+
+def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method):
+ changed = False
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ remove_method(name=name, item=diff)
+
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ add_method(name=name, item=diff)
+
+ return changed
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['name']
+ group = module.params['group']
+ user = module.params['user']
+
+ module_group = get_group_dict(description=module.params['description'], external=module.params['external'],
+ gid=module.params['gidnumber'], nonposix=module.params['nonposix'])
+ ipa_group = client.group_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_group:
+ changed = True
+ if not module.check_mode:
+ ipa_group = client.group_add(name, item=module_group)
+ else:
+ diff = get_group_diff(ipa_group, module_group)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_group.get(key)
+ client.group_mod(name=name, item=data)
+
+ if group is not None:
+ changed = modify_if_diff(module, name, ipa_group.get('member_group', []), group,
+ client.group_add_member_group,
+ client.group_remove_member_group) or changed
+
+ if user is not None:
+ changed = modify_if_diff(module, name, ipa_group.get('member_user', []), user,
+ client.group_add_member_user,
+ client.group_remove_member_user) or changed
+
+ else:
+ if ipa_group:
+ changed = True
+ if not module.check_mode:
+ client.group_del(name)
+
+ return changed, client.group_find(name=name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str', required=False),
+ external=dict(type='bool', required=False),
+ gidnumber=dict(type='str', required=False, aliases=['gid']),
+ group=dict(type='list', required=False),
+ nonposix=dict(type='bool', required=False),
+ state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
+ user=dict(type='list', required=False),
+ ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
+ ipa_host=dict(type='str', required=False, default='ipa.example.com'),
+ ipa_port=dict(type='int', required=False, default=443),
+ ipa_user=dict(type='str', required=False, default='admin'),
+ ipa_pass=dict(type='str', required=True, no_log=True),
+ validate_certs=dict(type='bool', required=False, default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ client = GroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, group = ensure(module, client)
+ module.exit_json(changed=changed, group=group)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/identity/ipa/ipa_hbacrule.py b/identity/ipa/ipa_hbacrule.py
new file mode 100644
index 00000000000..d93bc32fd45
--- /dev/null
+++ b/identity/ipa/ipa_hbacrule.py
@@ -0,0 +1,411 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipa_hbacrule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA HBAC rule
+description:
+- Add, modify or delete an IPA HBAC rule using IPA API.
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ description:
+ description: Description
+ required: false
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ required: false
+ hostcategory:
+ description: Host category
+ required: false
+ choices: ['all']
+ hostgroup:
+ description:
+ - List of hostgroup names to assign.
+ - If an empty list is passed all hostgroups will be removed. from the rule
+ - If option is omitted hostgroups will not be checked or changed.
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all services will be removed from the rule.
+ - If option is omitted services will not be checked or changed.
+ servicecategory:
+ description: Service category
+ required: false
+ choices: ['all']
+ servicegroup:
+ description:
+ - List of service group names to assign.
+ - If an empty list is passed all assigned service groups will be removed from the rule.
+ - If option is omitted service groups will not be checked or changed.
+ sourcehost:
+ description:
+ - List of source host names to assign.
+ - If an empty list if passed all assigned source hosts will be removed from the rule.
+ - If option is omitted source hosts will not be checked or changed.
+ sourcehostcategory:
+ description: Source host category
+ required: false
+ choices: ['all']
+ sourcehostgroup:
+ description:
+ - List of source host group names to assign.
+ - If an empty list if passed all assigned source host groups will be removed from the rule.
+ - If option is omitted source host groups will not be checked or changed.
+ state:
+ description: State to ensure
+ required: false
+ default: "present"
+ choices: ["present", "absent", "enabled", "disabled"]
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list if passed all assigned users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ usercategory:
+ description: User category
+ required: false
+ choices: ['all']
+ usergroup:
+ description:
+ - List of user group names to assign.
+ - If an empty list if passed all assigned user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ ipa_port:
+ description: Port of IPA server
+ required: false
+ default: 443
+ ipa_host:
+ description: IP or hostname of IPA server
+ required: false
+ default: "ipa.example.com"
+ ipa_user:
+ description: Administrative account used on IPA server
+ required: false
+ default: "admin"
+ ipa_pass:
+ description: Password of administrative user
+ required: true
+ ipa_prot:
+ description: Protocol used by IPA server
+ required: false
+ default: "https"
+ choices: ["http", "https"]
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+version_added: "2.3"
+'''
+
+EXAMPLES = '''
+# Ensure rule to allow all users to access any host from any host
+- ipa_hbacrule:
+ name: allow_all
+ description: Allow all users to access any host from any host
+ hostcategory: all
+ servicecategory: all
+ usercategory: all
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure rule with certain limitations
+- ipa_hbacrule:
+ name: allow_all_developers_access_to_db
+ description: Allow all developers to access any database from any host
+ hostgroup:
+ - db-server
+ usergroup:
+ - developers
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure rule is absent
+- ipa_hbacrule:
+ name: rule_to_be_deleted
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = '''
+hbacrule:
+ description: HBAC rule as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.ipa import IPAClient
+
+class HBACRuleIPAClient(IPAClient):
+
+ def __init__(self, module, host, port, protocol):
+ super(HBACRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def hbacrule_find(self, name):
+ return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name})
+
+ def hbacrule_add(self, name, item):
+ return self._post_json(method='hbacrule_add', name=name, item=item)
+
+ def hbacrule_mod(self, name, item):
+ return self._post_json(method='hbacrule_mod', name=name, item=item)
+
+ def hbacrule_del(self, name):
+ return self._post_json(method='hbacrule_del', name=name)
+
+ def hbacrule_add_host(self, name, item):
+ return self._post_json(method='hbacrule_add_host', name=name, item=item)
+
+ def hbacrule_remove_host(self, name, item):
+ return self._post_json(method='hbacrule_remove_host', name=name, item=item)
+
+ def hbacrule_add_service(self, name, item):
+ return self._post_json(method='hbacrule_add_service', name=name, item=item)
+
+ def hbacrule_remove_service(self, name, item):
+ return self._post_json(method='hbacrule_remove_service', name=name, item=item)
+
+ def hbacrule_add_user(self, name, item):
+ return self._post_json(method='hbacrule_add_user', name=name, item=item)
+
+ def hbacrule_remove_user(self, name, item):
+ return self._post_json(method='hbacrule_remove_user', name=name, item=item)
+
+ def hbacrule_add_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item)
+
+ def hbacrule_remove_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item)
+
+
+def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None,
+ sourcehostcategory=None,
+ usercategory=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if servicecategory is not None:
+ data['servicecategory'] = servicecategory
+ if sourcehostcategory is not None:
+ data['sourcehostcategory'] = sourcehostcategory
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ return data
+
+
+def get_hbcarule_diff(ipa_hbcarule, module_hbcarule):
+ data = []
+ for key in module_hbcarule.keys():
+ module_value = module_hbcarule.get(key, None)
+ ipa_value = ipa_hbcarule.get(key, None)
+ if isinstance(ipa_value, list) and not isinstance(module_value, list):
+ module_value = [module_value]
+ if isinstance(ipa_value, list) and isinstance(module_value, list):
+ ipa_value = sorted(ipa_value)
+ module_value = sorted(module_value)
+ if ipa_value != module_value:
+ data.append(key)
+ return data
+
+
+def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method, item):
+ changed = False
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ remove_method(name=name, item={item: diff})
+
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ add_method(name=name, item={item: diff})
+
+ return changed
+
+
+def ensure(module, client):
+ name = module.params['name']
+ state = module.params['state']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+ service = module.params['service']
+ servicecategory = module.params['servicecategory']
+ servicegroup = module.params['servicegroup']
+ sourcehost = module.params['sourcehost']
+ sourcehostcategory = module.params['sourcehostcategory']
+ sourcehostgroup = module.params['sourcehostgroup']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_hbacrule = get_hbacrule_dict(description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ servicecategory=servicecategory,
+ sourcehostcategory=sourcehostcategory,
+ usercategory=usercategory)
+ ipa_hbacrule = client.hbacrule_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule)
+ else:
+ diff = get_hbcarule_diff(ipa_hbacrule, module_hbacrule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hbacrule.get(key)
+ client.hbacrule_mod(name=name, item=data)
+
+ if host is not None:
+ changed = modify_if_diff(module, name, ipa_hbacrule.get('memberhost_host', []), host,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'host') or changed
+
+ if hostgroup is not None:
+ changed = modify_if_diff(module, name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'hostgroup') or changed
+
+ if service is not None:
+ changed = modify_if_diff(module, name, ipa_hbacrule.get('memberservice_hbacsvc', []), service,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvc') or changed
+
+ if servicegroup is not None:
+ changed = modify_if_diff(module, name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []),
+ servicegroup,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvcgroup') or changed
+
+ if sourcehost is not None:
+ changed = modify_if_diff(module, name, ipa_hbacrule.get('sourcehost_host', []), sourcehost,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'host') or changed
+
+ if sourcehostgroup is not None:
+ changed = modify_if_diff(module, name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'hostgroup') or changed
+
+ if user is not None:
+ changed = modify_if_diff(module, name, ipa_hbacrule.get('memberuser_user', []), user,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'user') or changed
+
+ if usergroup is not None:
+ changed = modify_if_diff(module, name, ipa_hbacrule.get('memberuser_group', []), usergroup,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'group') or changed
+ else:
+ if ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ client.hbacrule_del(name=name)
+
+ return changed, client.hbacrule_find(name=name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str', required=False),
+ host=dict(type='list', required=False),
+ hostcategory=dict(type='str', required=False, choices=['all']),
+ hostgroup=dict(type='list', required=False),
+ service=dict(type='list', required=False),
+ servicecategory=dict(type='str', required=False, choices=['all']),
+ servicegroup=dict(type='list', required=False),
+ sourcehost=dict(type='list', required=False),
+ sourcehostcategory=dict(type='str', required=False, choices=['all']),
+ sourcehostgroup=dict(type='list', required=False),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', required=False),
+ usercategory=dict(type='str', required=False, choices=['all']),
+ usergroup=dict(type='list', required=False),
+ ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
+ ipa_host=dict(type='str', required=False, default='ipa.example.com'),
+ ipa_port=dict(type='int', required=False, default=443),
+ ipa_user=dict(type='str', required=False, default='admin'),
+ ipa_pass=dict(type='str', required=True, no_log=True),
+ validate_certs=dict(type='bool', required=False, default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ client = HBACRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hbacrule = ensure(module, client)
+ module.exit_json(changed=changed, hbacrule=hbacrule)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/identity/ipa/ipa_host.py b/identity/ipa/ipa_host.py
new file mode 100644
index 00000000000..17b78500bc5
--- /dev/null
+++ b/identity/ipa/ipa_host.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipa_host
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host
+description:
+- Add, modify and delete an IPA host using IPA API
+options:
+ fqdn:
+ description:
+ - Full qualified domain name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ description:
+ description:
+ - A description of this host.
+ required: false
+ force:
+ description:
+ - Force host name even if not in DNS.
+ required: false
+ ip_address:
+ description:
+ - Add the host to DNS with this IP address.
+ required: false
+ mac_address:
+ description:
+ - List of Hardware MAC address(es) off this host.
+ - If option is omitted MAC addresses will not be checked or changed.
+ - If an empty list is passed all assigned MAC addresses will be removed.
+ - MAC addresses that are already assigned but not passed will be removed.
+ required: false
+ aliases: ["macaddress"]
+ ns_host_location:
+ description:
+ - Host location (e.g. "Lab 2")
+ required: false
+ aliases: ["nshostlocation"]
+ ns_hardware_platform:
+ description:
+ - Host hardware platform (e.g. "Lenovo T61")
+ required: false
+ aliases: ["nshardwareplatform"]
+ ns_os_version:
+ description:
+ - Host operating system and version (e.g. "Fedora 9")
+ required: false
+ aliases: ["nsosversion"]
+ user_certificate:
+ description:
+ - List of Base-64 encoded server certificates.
+ - If option is ommitted certificates will not be checked or changed.
+ - If an emtpy list is passed all assigned certificates will be removed.
+ - Certificates already assigned but not passed will be removed.
+ required: false
+ aliases: ["usercertificate"]
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ["present", "absent", "disabled"]
+ ipa_port:
+ description: Port of IPA server
+ required: false
+ default: 443
+ ipa_host:
+ description: IP or hostname of IPA server
+ required: false
+ default: ipa.example.com
+ ipa_user:
+ description: Administrative account used on IPA server
+ required: false
+ default: admin
+ ipa_pass:
+ description: Password of administrative user
+ required: true
+ ipa_prot:
+ description: Protocol used by IPA server
+ required: false
+ default: https
+ choices: ["http", "https"]
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+version_added: "2.3"
+'''
+
+EXAMPLES = '''
+# Ensure host is present
+- ipa_host:
+ name: host01.example.com
+ description: Example host
+ ip_address: 192.168.0.123
+ ns_host_location: Lab
+ ns_os_version: CentOS 7
+ ns_hardware_platform: Lenovo T61
+ mac_address:
+ - "08:00:27:E3:B1:2D"
+ - "52:54:00:BD:97:1E"
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure host is disabled
+- ipa_host:
+ name: host01.example.com
+ state: disabled
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure that all user certificates are removed
+- ipa_host:
+ name: host01.example.com
+ user_certificate: []
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure host is absent
+- ipa_host:
+ name: host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = '''
+host:
+ description: Host as returned by IPA API.
+ returned: always
+ type: dict
+host_diff:
+ description: List of options that differ and would be changed
+ returned: if check mode and a difference is found
+ type: list
+'''
+
+from ansible.module_utils.ipa import IPAClient
+
+class HostIPAClient(IPAClient):
+
+ def __init__(self, module, host, port, protocol):
+ super(HostIPAClient, self).__init__(module, host, port, protocol)
+
+ def host_find(self, name):
+ return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name})
+
+ def host_add(self, name, host):
+ return self._post_json(method='host_add', name=name, item=host)
+
+ def host_mod(self, name, host):
+ return self._post_json(method='host_mod', name=name, item=host)
+
+ def host_del(self, name):
+ return self._post_json(method='host_del', name=name)
+
+ def host_disable(self, name):
+ return self._post_json(method='host_disable', name=name)
+
+
+def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None,
+ ns_os_version=None, user_certificate=None, mac_address=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if force is not None:
+ data['force'] = force
+ if ip_address is not None:
+ data['ip_address'] = ip_address
+ if ns_host_location is not None:
+ data['nshostlocation'] = ns_host_location
+ if ns_hardware_platform is not None:
+ data['nshardwareplatform'] = ns_hardware_platform
+ if ns_os_version is not None:
+ data['nsosversion'] = ns_os_version
+ if user_certificate is not None:
+ data['usercertificate'] = [{"__base64__": item} for item in user_certificate]
+ if mac_address is not None:
+ data['macaddress'] = mac_address
+ return data
+
+
+def get_host_diff(ipa_host, module_host):
+ non_updateable_keys = ['force', 'ip_address']
+ data = []
+ for key in non_updateable_keys:
+ if key in module_host:
+ del module_host[key]
+ for key in module_host.keys():
+ ipa_value = ipa_host.get(key, None)
+ module_value = module_host.get(key, None)
+ if isinstance(ipa_value, list) and not isinstance(module_value, list):
+ module_value = [module_value]
+ if isinstance(ipa_value, list) and isinstance(module_value, list):
+ ipa_value = sorted(ipa_value)
+ module_value = sorted(module_value)
+ if ipa_value != module_value:
+ data.append(key)
+ return data
+
+
+def ensure(module, client):
+ name = module.params['name']
+ state = module.params['state']
+
+ ipa_host = client.host_find(name=name)
+ module_host = get_host_dict(description=module.params['description'],
+ force=module.params['force'], ip_address=module.params['ip_address'],
+ ns_host_location=module.params['ns_host_location'],
+ ns_hardware_platform=module.params['ns_hardware_platform'],
+ ns_os_version=module.params['ns_os_version'],
+ user_certificate=module.params['user_certificate'],
+ mac_address=module.params['mac_address'])
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_host:
+ changed = True
+ if not module.check_mode:
+ client.host_add(name=name, host=module_host)
+ else:
+ diff = get_host_diff(ipa_host, module_host)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_host.get(key)
+ client.host_mod(name=name, host=data)
+
+ else:
+ if ipa_host:
+ changed = True
+ if not module.check_mode:
+ client.host_del(name=name)
+
+ return changed, client.host_find(name=name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ description=dict(type='str', required=False),
+ fqdn=dict(type='str', required=True, aliases=['name']),
+ force=dict(type='bool', required=False),
+ ip_address=dict(type='str', required=False),
+ ns_host_location=dict(type='str', required=False, aliases=['nshostlocation']),
+ ns_hardware_platform=dict(type='str', required=False, aliases=['nshardwareplatform']),
+ ns_os_version=dict(type='str', required=False, aliases=['nsosversion']),
+ user_certificate=dict(type='list', required=False, aliases=['usercertificate']),
+ mac_address=dict(type='list', required=False, aliases=['macaddress']),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
+ ipa_host=dict(type='str', required=False, default='ipa.example.com'),
+ ipa_port=dict(type='int', required=False, default=443),
+ ipa_user=dict(type='str', required=False, default='admin'),
+ ipa_pass=dict(type='str', required=True, no_log=True),
+ validate_certs=dict(type='bool', required=False, default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ client = HostIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, host = ensure(module, client)
+ module.exit_json(changed=changed, host=host)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/identity/ipa/ipa_hostgroup.py b/identity/ipa/ipa_hostgroup.py
new file mode 100644
index 00000000000..57fbc5b4531
--- /dev/null
+++ b/identity/ipa/ipa_hostgroup.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipa_hostgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host-group
+description:
+- Add, modify and delete an IPA host-group using IPA API
+options:
+ cn:
+ description:
+ - Name of host-group.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ description:
+ description:
+ - Description
+ required: false
+ host:
+ description:
+ - List of hosts that belong to the host-group.
+ - If an empty list is passed all hosts will be removed from the group.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the group.
+ required: false
+ hostgroup:
+ description:
+ - List of host-groups than belong to that host-group.
+ - If an empty list is passed all host-groups will be removed from the group.
+ - If option is omitted host-groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the group.
+ required: false
+ state:
+ description:
+ - State to ensure.
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ ipa_port:
+ description: Port of IPA server
+ required: false
+ default: 443
+ ipa_host:
+ description: IP or hostname of IPA server
+ required: false
+ default: "ipa.example.com"
+ ipa_user:
+ description: Administrative account used on IPA server
+ required: false
+ default: "admin"
+ ipa_pass:
+ description: Password of administrative user
+ required: true
+ ipa_prot:
+ description: Protocol used by IPA server
+ required: false
+ default: "https"
+ choices: ["http", "https"]
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+version_added: "2.3"
+'''
+
+EXAMPLES = '''
+# Ensure host-group databases is present
+- ipa_hostgroup:
+ name: databases
+ state: present
+ host:
+ - db.example.com
+ hostgroup:
+ - mysql-server
+ - oracle-server
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure host-group databases is absent
+- ipa_hostgroup:
+ name: databases
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = '''
+hostgroup:
+ description: Hostgroup as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.ipa import IPAClient
+
+class HostGroupIPAClient(IPAClient):
+
+ def __init__(self, module, host, port, protocol):
+ super(HostGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def hostgroup_find(self, name):
+ return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def hostgroup_add(self, name, item):
+ return self._post_json(method='hostgroup_add', name=name, item=item)
+
+ def hostgroup_mod(self, name, item):
+ return self._post_json(method='hostgroup_mod', name=name, item=item)
+
+ def hostgroup_del(self, name):
+ return self._post_json(method='hostgroup_del', name=name)
+
+ def hostgroup_add_member(self, name, item):
+ return self._post_json(method='hostgroup_add_member', name=name, item=item)
+
+ def hostgroup_add_host(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'host': item})
+
+ def hostgroup_add_hostgroup(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'hostgroup': item})
+
+ def hostgroup_remove_member(self, name, item):
+ return self._post_json(method='hostgroup_remove_member', name=name, item=item)
+
+ def hostgroup_remove_host(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'host': item})
+
+ def hostgroup_remove_hostgroup(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'hostgroup': item})
+
+
+def get_hostgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_hostgroup_diff(ipa_hostgroup, module_hostgroup):
+ data = []
+ for key in module_hostgroup.keys():
+ ipa_value = ipa_hostgroup.get(key, None)
+ module_value = module_hostgroup.get(key, None)
+ if isinstance(ipa_value, list) and not isinstance(module_value, list):
+ module_value = [module_value]
+ if isinstance(ipa_value, list) and isinstance(module_value, list):
+ ipa_value = sorted(ipa_value)
+ module_value = sorted(module_value)
+ if ipa_value != module_value:
+ data.append(key)
+ return data
+
+
+def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method):
+ changed = False
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ remove_method(name=name, item=diff)
+
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ add_method(name=name, item=diff)
+ return changed
+
+
+def ensure(module, client):
+ name = module.params['name']
+ state = module.params['state']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+
+ ipa_hostgroup = client.hostgroup_find(name=name)
+ module_hostgroup = get_hostgroup_dict(description=module.params['description'])
+
+ changed = False
+ if state == 'present':
+ if not ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup)
+ else:
+ diff = get_hostgroup_diff(ipa_hostgroup, module_hostgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hostgroup.get(key)
+ client.hostgroup_mod(name=name, item=data)
+
+ if host is not None:
+ changed = modify_if_diff(module, name, ipa_hostgroup.get('member_host', []),
+ [item.lower() for item in host],
+ client.hostgroup_add_host, client.hostgroup_remove_host) or changed
+
+ if hostgroup is not None:
+ changed = modify_if_diff(module, name, ipa_hostgroup.get('member_hostgroup', []),
+ [item.lower() for item in hostgroup],
+ client.hostgroup_add_hostgroup, client.hostgroup_remove_hostgroup) or changed
+
+ else:
+ if ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ client.hostgroup_del(name=name)
+
+ return changed, client.hostgroup_find(name=name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str', required=False),
+ host=dict(type='list', required=False),
+ hostgroup=dict(type='list', required=False),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
+ ipa_host=dict(type='str', required=False, default='ipa.example.com'),
+ ipa_port=dict(type='int', required=False, default=443),
+ ipa_user=dict(type='str', required=False, default='admin'),
+ ipa_pass=dict(type='str', required=True, no_log=True),
+ validate_certs=dict(type='bool', required=False, default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ client = HostGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hostgroup = ensure(module, client)
+ module.exit_json(changed=changed, hostgroup=hostgroup)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/identity/ipa/ipa_role.py b/identity/ipa/ipa_role.py
new file mode 100644
index 00000000000..95cd2bc45ed
--- /dev/null
+++ b/identity/ipa/ipa_role.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipa_role
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA role
+description:
+- Add, modify and delete a role within FreeIPA server using FreeIPA API
+options:
+ cn:
+ description:
+ - Role name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ description:
+ description:
+ - A description of this role-group.
+ required: false
+ group:
+ description:
+ - List of group names assign to this role.
+ - If an empty list is passed all assigned groups will be unassigned from the role.
+ - If option is omitted groups will not be checked or changed.
+ - If option is passed all assigned groups that are not passed will be unassigned from the role.
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all assigned hosts will be unassigned from the role.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the role.
+ required: false
+ hostgroup:
+ description:
+ - List of host group names to assign.
+ - If an empty list is passed all assigned host groups will be removed from the role.
+ - If option is omitted host groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the role.
+ required: false
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all assigned services will be removed from the role.
+ - If option is omitted services will not be checked or changed.
+ - If option is passed all assigned services that are not passed will be removed from the role.
+ required: false
+ state:
+ description: State to ensure
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list is passed all assigned users will be removed from the role.
+ - If option is omitted users will not be checked or changed.
+ required: false
+ ipa_port:
+ description: Port of IPA server
+ required: false
+ default: 443
+ ipa_host:
+ description: IP or hostname of IPA server
+ required: false
+ default: "ipa.example.com"
+ ipa_user:
+ description: Administrative account used on IPA server
+ required: false
+ default: "admin"
+ ipa_pass:
+ description: Password of administrative user
+ required: true
+ ipa_prot:
+ description: Protocol used by IPA server
+ required: false
+ default: "https"
+ choices: ["http", "https"]
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+version_added: "2.3"
+'''
+
+EXAMPLES = '''
+# Ensure role is present
+- ipa_role:
+ name: dba
+ description: Database Administrators
+ state: present
+ user:
+ - pinky
+ - brain
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure role with certain details
+- ipa_role:
+ name: another-role
+ description: Just another role
+ group:
+ - editors
+ host:
+ - host01.example.com
+ hostgroup:
+ - hostgroup01
+ service:
+ - service01
+
+# Ensure role is absent
+- ipa_role:
+ name: dba
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = '''
+role:
+ description: Role as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.ipa import IPAClient
+
+class RoleIPAClient(IPAClient):
+
+ def __init__(self, module, host, port, protocol):
+ super(RoleIPAClient, self).__init__(module, host, port, protocol)
+
+ def role_find(self, name):
+ return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name})
+
+ def role_add(self, name, item):
+ return self._post_json(method='role_add', name=name, item=item)
+
+ def role_mod(self, name, item):
+ return self._post_json(method='role_mod', name=name, item=item)
+
+ def role_del(self, name):
+ return self._post_json(method='role_del', name=name)
+
+ def role_add_member(self, name, item):
+ return self._post_json(method='role_add_member', name=name, item=item)
+
+ def role_add_group(self, name, item):
+ return self.role_add_member(name=name, item={'group': item})
+
+ def role_add_host(self, name, item):
+ return self.role_add_member(name=name, item={'host': item})
+
+ def role_add_hostgroup(self, name, item):
+ return self.role_add_member(name=name, item={'hostgroup': item})
+
+ def role_add_service(self, name, item):
+ return self.role_add_member(name=name, item={'service': item})
+
+ def role_add_user(self, name, item):
+ return self.role_add_member(name=name, item={'user': item})
+
+ def role_remove_member(self, name, item):
+ return self._post_json(method='role_remove_member', name=name, item=item)
+
+ def role_remove_group(self, name, item):
+ return self.role_remove_member(name=name, item={'group': item})
+
+ def role_remove_host(self, name, item):
+ return self.role_remove_member(name=name, item={'host': item})
+
+ def role_remove_hostgroup(self, name, item):
+ return self.role_remove_member(name=name, item={'hostgroup': item})
+
+ def role_remove_service(self, name, item):
+ return self.role_remove_member(name=name, item={'service': item})
+
+ def role_remove_user(self, name, item):
+ return self.role_remove_member(name=name, item={'user': item})
+
+
+def get_role_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_role_diff(ipa_role, module_role):
+ data = []
+ for key in module_role.keys():
+ module_value = module_role.get(key, None)
+ ipa_value = ipa_role.get(key, None)
+ if isinstance(ipa_value, list) and not isinstance(module_value, list):
+ module_value = [module_value]
+ if isinstance(ipa_value, list) and isinstance(module_value, list):
+ ipa_value = sorted(ipa_value)
+ module_value = sorted(module_value)
+ if ipa_value != module_value:
+ data.append(key)
+ return data
+
+
+def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method):
+ changed = False
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ remove_method(name=name, item=diff)
+
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ add_method(name=name, item=diff)
+ return changed
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['name']
+ group = module.params['group']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+ service = module.params['service']
+ user = module.params['user']
+
+ module_role = get_role_dict(description=module.params['description'])
+ ipa_role = client.role_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_role:
+ changed = True
+ if not module.check_mode:
+ ipa_role = client.role_add(name=name, item=module_role)
+ else:
+ diff = get_role_diff(ipa_role=ipa_role, module_role=module_role)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_role.get(key)
+ client.role_mod(name=name, item=data)
+
+ if group is not None:
+ changed = modify_if_diff(module, name, ipa_role.get('member_group', []), group,
+ client.role_add_group,
+ client.role_remove_group) or changed
+
+ if host is not None:
+ changed = modify_if_diff(module, name, ipa_role.get('member_host', []), host,
+ client.role_add_host,
+ client.role_remove_host) or changed
+
+ if hostgroup is not None:
+ changed = modify_if_diff(module, name, ipa_role.get('member_hostgroup', []), hostgroup,
+ client.role_add_hostgroup,
+ client.role_remove_hostgroup) or changed
+
+ if service is not None:
+ changed = modify_if_diff(module, name, ipa_role.get('member_service', []), service,
+ client.role_add_service,
+ client.role_remove_service) or changed
+ if user is not None:
+ changed = modify_if_diff(module, name, ipa_role.get('member_user', []), user,
+ client.role_add_user,
+ client.role_remove_user) or changed
+ else:
+ if ipa_role:
+ changed = True
+ if not module.check_mode:
+ client.role_del(name)
+
+ return changed, client.role_find(name=name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str', required=False),
+ group=dict(type='list', required=False),
+ host=dict(type='list', required=False),
+ hostgroup=dict(type='list', required=False),
+ service=dict(type='list', required=False),
+ state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
+ user=dict(type='list', required=False),
+ ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
+ ipa_host=dict(type='str', required=False, default='ipa.example.com'),
+ ipa_port=dict(type='int', required=False, default=443),
+ ipa_user=dict(type='str', required=False, default='admin'),
+ ipa_pass=dict(type='str', required=True, no_log=True),
+ validate_certs=dict(type='bool', required=False, default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ client = RoleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, role = ensure(module, client)
+ module.exit_json(changed=changed, role=role)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/identity/ipa/ipa_sudocmd.py b/identity/ipa/ipa_sudocmd.py
new file mode 100644
index 00000000000..6ec3c84bb1d
--- /dev/null
+++ b/identity/ipa/ipa_sudocmd.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipa_sudocmd
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command
+description:
+- Add, modify or delete sudo command within FreeIPA server using FreeIPA API.
+options:
+ sudocmd:
+ description:
+ - Sudo Command.
+ aliases: ['name']
+ required: true
+ description:
+ description:
+ - A description of this command.
+ required: false
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ ipa_port:
+ description: Port of IPA server
+ required: false
+ default: 443
+ ipa_host:
+ description: IP or hostname of IPA server
+ required: false
+ default: "ipa.example.com"
+ ipa_user:
+ description: Administrative account used on IPA server
+ required: false
+ default: "admin"
+ ipa_pass:
+ description: Password of administrative user
+ required: true
+ ipa_prot:
+ description: Protocol used by IPA server
+ required: false
+ default: "https"
+ choices: ["http", "https"]
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+version_added: "2.3"
+'''
+
+EXAMPLES = '''
+# Ensure sudo command exists
+- ipa_sudocmd:
+ name: su
+ description: Allow to run su via sudo
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure sudo command does not exist
+- ipa_sudocmd:
+ name: su
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = '''
+sudocmd:
+ description: Sudo command as return from IPA API
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.ipa import IPAClient
+
+class SudoCmdIPAClient(IPAClient):
+
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmd_find(self, name):
+ return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name})
+
+ def sudocmd_add(self, name, item):
+ return self._post_json(method='sudocmd_add', name=name, item=item)
+
+ def sudocmd_mod(self, name, item):
+ return self._post_json(method='sudocmd_mod', name=name, item=item)
+
+ def sudocmd_del(self, name):
+ return self._post_json(method='sudocmd_del', name=name)
+
+
+def get_sudocmd_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_sudocmd_diff(ipa_sudocmd, module_sudocmd):
+ data = []
+ for key in module_sudocmd.keys():
+ module_value = module_sudocmd.get(key, None)
+ ipa_value = ipa_sudocmd.get(key, None)
+ if isinstance(ipa_value, list) and not isinstance(module_value, list):
+ module_value = [module_value]
+ if isinstance(ipa_value, list) and isinstance(module_value, list):
+ ipa_value = sorted(ipa_value)
+ module_value = sorted(module_value)
+ if ipa_value != module_value:
+ data.append(key)
+ return data
+
+
+def ensure(module, client):
+ name = module.params['sudocmd']
+ state = module.params['state']
+
+ module_sudocmd = get_sudocmd_dict(description=module.params['description'])
+ ipa_sudocmd = client.sudocmd_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_add(name=name, item=module_sudocmd)
+ else:
+ diff = get_sudocmd_diff(ipa_sudocmd, module_sudocmd)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmd.get(key)
+ client.sudocmd_mod(name=name, item=data)
+ else:
+ if ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_del(name=name)
+
+ return changed, client.sudocmd_find(name=name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ description=dict(type='str', required=False),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='str', required=True, aliases=['name']),
+ ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
+ ipa_host=dict(type='str', required=False, default='ipa.example.com'),
+ ipa_port=dict(type='int', required=False, default=443),
+ ipa_user=dict(type='str', required=False, default='admin'),
+ ipa_pass=dict(type='str', required=True, no_log=True),
+ validate_certs=dict(type='bool', required=False, default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ client = SudoCmdIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmd = ensure(module, client)
+ module.exit_json(changed=changed, sudocmd=sudocmd)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/identity/ipa/ipa_sudocmdgroup.py b/identity/ipa/ipa_sudocmdgroup.py
new file mode 100644
index 00000000000..e1d0e9b6021
--- /dev/null
+++ b/identity/ipa/ipa_sudocmdgroup.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipa_sudocmdgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command group
+description:
+- Add, modify or delete sudo command group within IPA server using IPA API.
+options:
+ cn:
+ description:
+ - Sudo Command Group.
+ aliases: ['name']
+ required: true
+ description:
+ description:
+ - Group description.
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ sudocmd:
+ description:
+ - List of sudo commands to assign to the group.
+ - If an empty list is passed all assigned commands will be removed from the group.
+ - If option is omitted sudo commands will not be checked or changed.
+ required: false
+ ipa_port:
+ description: Port of IPA server
+ required: false
+ default: 443
+ ipa_host:
+ description: IP or hostname of IPA server
+ required: false
+ default: "ipa.example.com"
+ ipa_user:
+ description: Administrative account used on IPA server
+ required: false
+ default: "admin"
+ ipa_pass:
+ description: Password of administrative user
+ required: true
+ ipa_prot:
+ description: Protocol used by IPA server
+ required: false
+ default: "https"
+ choices: ["http", "https"]
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+version_added: "2.3"
+'''
+
+EXAMPLES = '''
+- name: Ensure sudo command group exists
+ ipa_sudocmdgroup:
+ name: group01
+ description: Group of important commands
+ sudocmd:
+ - su
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure sudo command group does not exists
+ ipa_sudocmdgroup:
+ name: group01
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = '''
+sudocmdgroup:
+ description: Sudo command group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.ipa import IPAClient
+
+class SudoCmdGroupIPAClient(IPAClient):
+
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmdgroup_find(self, name):
+ return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def sudocmdgroup_add(self, name, item):
+ return self._post_json(method='sudocmdgroup_add', name=name, item=item)
+
+ def sudocmdgroup_mod(self, name, item):
+ return self._post_json(method='sudocmdgroup_mod', name=name, item=item)
+
+ def sudocmdgroup_del(self, name):
+ return self._post_json(method='sudocmdgroup_del', name=name)
+
+ def sudocmdgroup_add_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_add_member', name=name, item=item)
+
+ def sudocmdgroup_add_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item})
+
+ def sudocmdgroup_remove_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item)
+
+ def sudocmdgroup_remove_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item})
+
+
+def get_sudocmdgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method):
+ changed = False
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ remove_method(name=name, item=diff)
+
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ add_method(name=name, item=diff)
+ return changed
+
+
+def get_sudocmdgroup_diff(ipa_sudocmdgroup, module_sudocmdgroup):
+ data = []
+ for key in module_sudocmdgroup.keys():
+ module_value = module_sudocmdgroup.get(key, None)
+ ipa_value = ipa_sudocmdgroup.get(key, None)
+ if isinstance(ipa_value, list) and not isinstance(module_value, list):
+ module_value = [module_value]
+ if isinstance(ipa_value, list) and isinstance(module_value, list):
+ ipa_value = sorted(ipa_value)
+ module_value = sorted(module_value)
+ if ipa_value != module_value:
+ data.append(key)
+ return data
+
+
+def ensure(module, client):
+ name = module.params['name']
+ state = module.params['state']
+ sudocmd = module.params['sudocmd']
+
+ module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description'])
+ ipa_sudocmdgroup = client.sudocmdgroup_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup)
+ else:
+ diff = get_sudocmdgroup_diff(ipa_sudocmdgroup, module_sudocmdgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmdgroup.get(key)
+ client.sudocmdgroup_mod(name=name, item=data)
+
+ if sudocmd is not None:
+ changed = modify_if_diff(module, name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd,
+ client.sudocmdgroup_add_member_sudocmd,
+ client.sudocmdgroup_remove_member_sudocmd)
+ else:
+ if ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ client.sudocmdgroup_del(name=name)
+
+ return changed, client.sudocmdgroup_find(name=name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str', required=False),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='list', required=False),
+ ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
+ ipa_host=dict(type='str', required=False, default='ipa.example.com'),
+ ipa_port=dict(type='int', required=False, default=443),
+ ipa_user=dict(type='str', required=False, default='admin'),
+ ipa_pass=dict(type='str', required=True, no_log=True),
+ validate_certs=dict(type='bool', required=False, default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ client = SudoCmdGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmdgroup = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudocmdgroup)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/identity/ipa/ipa_sudorule.py b/identity/ipa/ipa_sudorule.py
new file mode 100644
index 00000000000..f5da15a7046
--- /dev/null
+++ b/identity/ipa/ipa_sudorule.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipa_sudorule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo rule
+description:
+- Add, modify or delete sudo rule within IPA server using IPA API.
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ cmdcategory:
+ description:
+ - Command category the rule applies to.
+ choices: ['all']
+ required: false
+ cmd:
+ description:
+ - List of commands assigned to the rule.
+ - If an empty list is passed all commands will be removed from the rule.
+ - If option is omitted commands will not be checked or changed.
+ required: false
+ host:
+ description:
+ - List of hosts assigned to the rule.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign hosts.
+ required: false
+ hostcategory:
+ description:
+ - Host category the rule applies to.
+ - If 'all' is passed one must omit C(host) and C(hostgroup).
+ - Option C(host) and C(hostgroup) must be omitted to assign 'all'.
+ choices: ['all']
+ required: false
+ hostgroup:
+ description:
+ - List of host groups assigned to the rule.
+ - If an empty list is passed all host groups will be removed from the rule.
+ - If option is omitted host groups will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign host groups.
+ required: false
+ user:
+ description:
+ - List of users assigned to the rule.
+ - If an empty list is passed all users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ required: false
+ usercategory:
+ description:
+ - User category the rule applies to.
+ choices: ['all']
+ required: false
+ usergroup:
+ description:
+ - List of user groups assigned to the rule.
+ - If an empty list is passed all user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ required: false
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ['present', 'absent', 'enabled', 'disabled']
+ ipa_port:
+ description: Port of IPA server
+ required: false
+ default: 443
+ ipa_host:
+ description: IP or hostname of IPA server
+ required: false
+ default: "ipa.example.com"
+ ipa_user:
+ description: Administrative account used on IPA server
+ required: false
+ default: "admin"
+ ipa_pass:
+ description: Password of administrative user
+ required: true
+ ipa_prot:
+ description: Protocol used by IPA server
+ required: false
+ default: "https"
+ choices: ["http", "https"]
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+version_added: "2.3"
+'''
+
+EXAMPLES = '''
+# Ensure sudo rule is present thats allows all every body to execute any command on any host without beeing asked for a password.
+- ipa_sudorule:
+ name: sudo_all_nopasswd
+ cmdcategory: all
+ description: Allow to run every command with sudo without password
+ hostcategory: all
+ sudoopt:
+ - '!authenticate'
+ usercategory: all
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+# Ensure user group developers can run every command on host group db-server as well as on host db01.example.com.
+- ipa_sudorule:
+ name: sudo_dev_dbserver
+ description: Allow developers to run every command with sudo on all database server
+ cmdcategory: all
+ host:
+ - db01.example.com
+ hostgroup:
+ - db-server
+ sudoopt:
+ - '!authenticate'
+ usergroup:
+ - developers
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = '''
+sudorule:
+ description: Sudorule as returned by IPA
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.ipa import IPAClient
+
+class SudoRuleIPAClient(IPAClient):
+
+ def __init__(self, module, host, port, protocol):
+ super(SudoRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudorule_find(self, name):
+ return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name})
+
+ def sudorule_add(self, name, item):
+ return self._post_json(method='sudorule_add', name=name, item=item)
+
+ def sudorule_mod(self, name, item):
+ return self._post_json(method='sudorule_mod', name=name, item=item)
+
+ def sudorule_del(self, name):
+ return self._post_json(method='sudorule_del', name=name)
+
+ def sudorule_add_option(self, name, item):
+ return self._post_json(method='sudorule_add_option', name=name, item=item)
+
+ def sudorule_add_option_ipasudoopt(self, name, item):
+ return self.sudorule_add_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_remove_option(self, name, item):
+ return self._post_json(method='sudorule_remove_option', name=name, item=item)
+
+ def sudorule_remove_option_ipasudoopt(self, name, item):
+ return self.sudorule_remove_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_add_host(self, name, item):
+ return self._post_json(method='sudorule_add_host', name=name, item=item)
+
+ def sudorule_add_host_host(self, name, item):
+ return self.sudorule_add_host(name=name, item={'host': item})
+
+ def sudorule_add_host_hostgroup(self, name, item):
+ return self.sudorule_add_host(name=name, item={'hostgroup': item})
+
+ def sudorule_remove_host(self, name, item):
+ return self._post_json(method='sudorule_remove_host', name=name, item=item)
+
+ def sudorule_remove_host_host(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'host': item})
+
+ def sudorule_remove_host_hostgroup(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'hostgroup': item})
+
+ def sudorule_add_allow_command(self, name, item):
+ return self._post_json(method='sudorule_add_allow_command', name=name, item=item)
+
+ def sudorule_remove_allow_command(self, name, item):
+ return self._post_json(method='sudorule_remove_allow_command', name=name, item=item)
+
+ def sudorule_add_user(self, name, item):
+ return self._post_json(method='sudorule_add_user', name=name, item=item)
+
+ def sudorule_add_user_user(self, name, item):
+ return self.sudorule_add_user(name=name, item={'user': item})
+
+ def sudorule_add_user_group(self, name, item):
+ return self.sudorule_add_user(name=name, item={'group': item})
+
+ def sudorule_remove_user(self, name, item):
+ return self._post_json(method='sudorule_remove_user', name=name, item=item)
+
+ def sudorule_remove_user_user(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'user': item})
+
+ def sudorule_remove_user_group(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'group': item})
+
+
+def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None):
+ data = {}
+ if cmdcategory is not None:
+ data['cmdcategory'] = cmdcategory
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ return data
+
+
+def get_sudorule_diff(ipa_sudorule, module_sudorule):
+ data = []
+ for key in module_sudorule.keys():
+ module_value = module_sudorule.get(key, None)
+ ipa_value = ipa_sudorule.get(key, None)
+ if isinstance(ipa_value, list) and not isinstance(module_value, list):
+ module_value = [module_value]
+ if isinstance(ipa_value, list) and isinstance(module_value, list):
+ ipa_value = sorted(ipa_value)
+ module_value = sorted(module_value)
+ if ipa_value != module_value:
+ data.append(key)
+ return data
+
+
+def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method):
+ changed = False
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ remove_method(name=name, item=item)
+
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ add_method(name=name, item=item)
+
+ return changed
+
+
+def category_changed(module, client, category_name, ipa_sudorule):
+ if ipa_sudorule.get(category_name, None) == ['all']:
+ if not module.check_mode:
+ # cn is returned as list even with only a single value.
+ client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None})
+ return True
+ return False
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['name']
+ cmd = module.params['cmd']
+ cmdcategory = module.params['cmdcategory']
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ sudoopt = module.params['sudoopt']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory,
+ description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ usercategory=usercategory)
+ ipa_sudorule = client.sudorule_find(name=name)
+
+ changed = False
+ if state in ['present', 'disabled', 'enabled']:
+ if not ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule)
+ else:
+ diff = get_sudorule_diff(ipa_sudorule, module_sudorule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ if 'hostcategory' in diff:
+ if ipa_sudorule.get('memberhost_host', None) is not None:
+ client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host'))
+ if ipa_sudorule.get('memberhost_hostgroup', None) is not None:
+ client.sudorule_remove_host_hostgroup(name=name,
+ item=ipa_sudorule.get('memberhost_hostgroup'))
+
+ client.sudorule_mod(name=name, item=module_sudorule)
+
+ if cmd is not None:
+ changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
+ if not module.check_mode:
+ client.sudorule_add_allow_command(name=name, item=cmd)
+
+ if host is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = modify_if_diff(module, name, ipa_sudorule.get('memberhost_host', []), host,
+ client.sudorule_add_host_host,
+ client.sudorule_remove_host_host) or changed
+
+ if hostgroup is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = modify_if_diff(module, name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup,
+ client.sudorule_add_host_hostgroup,
+ client.sudorule_remove_host_hostgroup) or changed
+ if sudoopt is not None:
+ changed = modify_if_diff(module, name, ipa_sudorule.get('ipasudoopt', []), sudoopt,
+ client.sudorule_add_option_ipasudoopt,
+ client.sudorule_remove_option_ipasudoopt) or changed
+ if user is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = modify_if_diff(module, name, ipa_sudorule.get('memberuser_user', []), user,
+ client.sudorule_add_user_user,
+ client.sudorule_remove_user_user) or changed
+ if usergroup is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = modify_if_diff(module, name, ipa_sudorule.get('memberuser_group', []), usergroup,
+ client.sudorule_add_user_group,
+ client.sudorule_remove_user_group) or changed
+ else:
+ if ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ client.sudorule_del(name)
+
+ return changed, client.sudorule_find(name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ cmd=dict(type='list', required=False),
+ cmdcategory=dict(type='str', required=False, choices=['all']),
+ cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str', required=False),
+ host=dict(type='list', required=False),
+ hostcategory=dict(type='str', required=False, choices=['all']),
+ hostgroup=dict(type='list', required=False),
+ sudoopt=dict(type='list', required=False),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', required=False),
+ usercategory=dict(type='str', required=False, choices=['all']),
+ usergroup=dict(type='list', required=False),
+ ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
+ ipa_host=dict(type='str', required=False, default='ipa.example.com'),
+ ipa_port=dict(type='int', required=False, default=443),
+ ipa_user=dict(type='str', required=False, default='admin'),
+ ipa_pass=dict(type='str', required=True, no_log=True),
+ validate_certs=dict(type='bool', required=False, default=True),
+ ),
+ mutually_exclusive=[['cmdcategory', 'cmd'],
+ ['hostcategory', 'host'],
+ ['hostcategory', 'hostgroup'],
+ ['usercategory', 'user'],
+ ['usercategory', 'usergroup']],
+ supports_check_mode=True,
+ )
+
+ client = SudoRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudorule = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudorule)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/identity/ipa/ipa_user.py b/identity/ipa/ipa_user.py
new file mode 100644
index 00000000000..5e020d73440
--- /dev/null
+++ b/identity/ipa/ipa_user.py
@@ -0,0 +1,346 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipa_user
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA users
+description:
+- Add, modify and delete user within IPA server
+options:
+ displayname:
+ description: Display name
+ required: false
+ givenname:
+ description: First name
+ required: false
+ loginshell:
+ description: Login shell
+ required: false
+ mail:
+ description:
+ - List of mail addresses assigned to the user.
+ - If an empty list is passed all assigned email addresses will be deleted.
+ - If None is passed email addresses will not be checked or changed.
+ required: false
+ password:
+ description:
+ - Password
+ required: false
+ sn:
+ description: Surname
+ required: false
+ sshpubkey:
+ description:
+ - List of public SSH key.
+ - If an empty list is passed all assigned public keys will be deleted.
+ - If None is passed SSH public keys will not be checked or changed.
+ required: false
+ state:
+ description: State to ensure
+ required: false
+ default: "present"
+ choices: ["present", "absent", "enabled", "disabled"]
+ telephonenumber:
+ description:
+ - List of telephone numbers assigned to the user.
+ - If an empty list is passed all assigned telephone numbers will be deleted.
+ - If None is passed telephone numbers will not be checked or changed.
+ required: false
+ title:
+ description: Title
+ required: false
+ uid:
+ description: uid of the user
+ required: true
+ aliases: ["name"]
+ ipa_port:
+ description: Port of IPA server
+ required: false
+ default: 443
+ ipa_host:
+ description: IP or hostname of IPA server
+ required: false
+ default: "ipa.example.com"
+ ipa_user:
+ description: Administrative account used on IPA server
+ required: false
+ default: "admin"
+ ipa_pass:
+ description: Password of administrative user
+ required: true
+ ipa_prot:
+ description: Protocol used by IPA server
+ required: false
+ default: "https"
+ choices: ["http", "https"]
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+version_added: "2.3"
+requirements:
+- base64
+- hashlib
+'''
+
+EXAMPLES = '''
+# Ensure pinky is present
+- ipa_user:
+ name: pinky
+ state: present
+ givenname: Pinky
+ sn: Acme
+ mail:
+ - pinky@acme.com
+ telephonenumber:
+ - '+555123456'
+ sshpubkeyfp:
+ - ssh-rsa ....
+ - ssh-dsa ....
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+# Ensure brain is absent
+- ipa_user:
+ name: brain
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = '''
+user:
+ description: User as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import base64
+import hashlib
+
+from ansible.module_utils.ipa import IPAClient
+
+class UserIPAClient(IPAClient):
+
+ def __init__(self, module, host, port, protocol):
+ super(UserIPAClient, self).__init__(module, host, port, protocol)
+
+ def user_find(self, name):
+ return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name})
+
+ def user_add(self, name, item):
+ return self._post_json(method='user_add', name=name, item=item)
+
+ def user_mod(self, name, item):
+ return self._post_json(method='user_mod', name=name, item=item)
+
+ def user_del(self, name):
+ return self._post_json(method='user_del', name=name)
+
+ def user_disable(self, name):
+ return self._post_json(method='user_disable', name=name)
+
+ def user_enable(self, name):
+ return self._post_json(method='user_enable', name=name)
+
+
+def get_user_dict(displayname=None, givenname=None, loginshell=None, mail=None, nsaccountlock=False, sn=None,
+ sshpubkey=None, telephonenumber=None, title=None, userpassword=None):
+ user = {}
+ if displayname is not None:
+ user['displayname'] = displayname
+ if givenname is not None:
+ user['givenname'] = givenname
+ if loginshell is not None:
+ user['loginshell'] = loginshell
+ if mail is not None:
+ user['mail'] = mail
+ user['nsaccountlock'] = nsaccountlock
+ if sn is not None:
+ user['sn'] = sn
+ if sshpubkey is not None:
+ user['ipasshpubkey'] = sshpubkey
+ if telephonenumber is not None:
+ user['telephonenumber'] = telephonenumber
+ if title is not None:
+ user['title'] = title
+ if userpassword is not None:
+ user['userpassword'] = userpassword
+
+ return user
+
+
+def get_user_diff(ipa_user, module_user):
+ """
+ Return the keys of each dict whereas values are different. Unfortunately the IPA
+ API returns everything as a list even if only a single value is possible.
+ Therefore some more complexity is needed.
+ The method will check if the value type of module_user.attr is not a list and
+ create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method
+ must not be changed if the returned API dict is changed.
+ :param ipa_user:
+ :param module_user:
+ :return:
+ """
+ # return [item for item in module_user.keys() if module_user.get(item, None) != ipa_user.get(item, None)]
+ result = []
+ # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints.
+ # These are used for comparison.
+ sshpubkey = None
+ if 'ipasshpubkey' in module_user:
+ module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey) for pubkey in module_user['ipasshpubkey']]
+ # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on
+ sshpubkey = module_user['ipasshpubkey']
+ del module_user['ipasshpubkey']
+ for key in module_user.keys():
+ mod_value = module_user.get(key, None)
+ ipa_value = ipa_user.get(key, None)
+ if isinstance(ipa_value, list) and not isinstance(mod_value, list):
+ mod_value = [mod_value]
+ if isinstance(ipa_value, list) and isinstance(mod_value, list):
+ mod_value = sorted(mod_value)
+ ipa_value = sorted(ipa_value)
+ if mod_value != ipa_value:
+ result.append(key)
+ # If there are public keys, remove the fingerprints and add them back to the dict
+ if sshpubkey is not None:
+ del module_user['sshpubkeyfp']
+ module_user['ipasshpubkey'] = sshpubkey
+ return result
+
+
+def get_ssh_key_fingerprint(ssh_key):
+ """
+ Return the public key fingerprint of a given public SSH key
+ in format "FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7 [user@host] (ssh-rsa)"
+ :param ssh_key:
+ :return:
+ """
+ parts = ssh_key.strip().split()
+ if len(parts) == 0:
+ return None
+ key_type = parts[0]
+ key = base64.b64decode(parts[1].encode('ascii'))
+
+ fp_plain = hashlib.md5(key).hexdigest()
+ key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper()
+ if len(parts) < 3:
+ return "%s (%s)" % (key_fp, key_type)
+ else:
+ user_host = parts[2]
+ return "%s %s (%s)" % (key_fp, user_host, key_type)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['name']
+ nsaccountlock = state == 'disabled'
+
+ module_user = get_user_dict(displayname=module.params.get('displayname'),
+ givenname=module.params.get('givenname'),
+ loginshell=module.params['loginshell'],
+ mail=module.params['mail'], sn=module.params['sn'],
+ sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock,
+ telephonenumber=module.params['telephonenumber'], title=module.params['title'],
+ userpassword=module.params['password'])
+
+ ipa_user = client.user_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_user:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_add(name=name, item=module_user)
+ else:
+ diff = get_user_diff(ipa_user, module_user)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_mod(name=name, item=module_user)
+ else:
+ if ipa_user:
+ changed = True
+ if not module.check_mode:
+ client.user_del(name)
+
+ return changed, ipa_user
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ displayname=dict(type='str', required=False),
+ givenname=dict(type='str', required=False),
+ loginshell=dict(type='str', required=False),
+ mail=dict(type='list', required=False),
+ sn=dict(type='str', required=False),
+ uid=dict(type='str', required=True, aliases=['name']),
+ password=dict(type='str', required=False, no_log=True),
+ sshpubkey=dict(type='list', required=False),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ telephonenumber=dict(type='list', required=False),
+ title=dict(type='str', required=False),
+ ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
+ ipa_host=dict(type='str', required=False, default='ipa.example.com'),
+ ipa_port=dict(type='int', required=False, default=443),
+ ipa_user=dict(type='str', required=False, default='admin'),
+ ipa_pass=dict(type='str', required=True, no_log=True),
+ validate_certs=dict(type='bool', required=False, default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ client = UserIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list).
+ # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey
+ # as different which should be avoided.
+ if module.params['sshpubkey'] is not None:
+ if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] is "":
+ module.params['sshpubkey'] = None
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, user = ensure(module, client)
+ module.exit_json(changed=changed, user=user)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/identity/opendj/__init__.py b/identity/opendj/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/identity/opendj/opendj_backendprop.py b/identity/opendj/opendj_backendprop.py
new file mode 100644
index 00000000000..893bbfdd47d
--- /dev/null
+++ b/identity/opendj/opendj_backendprop.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: opendj_backendprop
+short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command.
+description:
+ - This module will update settings for OpenDJ with the command set-backend-prop.
+ - It will check first via de get-backend-prop if configuration needs to be applied.
+version_added: "2.2"
+author:
+ - Werner Dijkerman
+options:
+ opendj_bindir:
+ description:
+ - The path to the bin directory of OpenDJ.
+ required: false
+ default: /opt/opendj/bin
+ hostname:
+ description:
+ - The hostname of the OpenDJ server.
+ required: true
+ port:
+ description:
+ - The Admin port on which the OpenDJ instance is available.
+ required: true
+ username:
+ description:
+ - The username to connect to.
+ required: false
+ default: cn=Directory Manager
+ password:
+ description:
+ - The password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ passwordfile:
+ description:
+ - Location to the password file which holds the password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ backend:
+ description:
+ - The name of the backend on which the property needs to be updated.
+ required: true
+ name:
+ description:
+ - The configuration setting to update.
+ required: true
+ value:
+ description:
+ - The value for the configuration item.
+ required: true
+ state:
+ description:
+ - If configuration needs to be added/updated
+ required: false
+ default: "present"
+'''
+
+EXAMPLES = '''
+ - name: "Add or update OpenDJ backend properties"
+ action: opendj_backendprop
+ hostname=localhost
+ port=4444
+ username="cn=Directory Manager"
+ password=password
+ backend=userRoot
+ name=index-entry-limit
+ value=5000
+'''
+
+RETURN = '''
+'''
+
+import subprocess
+
+
+class BackendProp(object):
+ def __init__(self, module):
+ self._module = module
+
+ def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'get-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '-n', '-X', '-s'
+ ] + password_method
+ process = subprocess.Popen(my_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = process.communicate()
+ if process.returncode == 0:
+ return stdout
+ else:
+ self._module.fail_json(msg="Error message: " + str(stderr))
+
+ def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name,name, value):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'set-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '--set', name + ":" + value,
+ '-n', '-X'
+ ] + password_method
+ process = subprocess.Popen(my_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = process.communicate()
+ if process.returncode == 0:
+ return True
+ else:
+ self._module.fail_json(msg="Error message: " + stderr)
+
+ def validate_data(self, data=None, name=None, value=None):
+ for config_line in data.split('\n'):
+ if config_line:
+ split_line = config_line.split()
+ if split_line[0] == name:
+ if split_line[1] == value:
+ return True
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ opendj_bindir=dict(default="/opt/opendj/bin", type="path"),
+ hostname=dict(required=True),
+ port=dict(required=True),
+ username=dict(default="cn=Directory Manager", required=False),
+ password=dict(required=False, no_log=True),
+ passwordfile=dict(required=False, type="path"),
+ backend=dict(required=True),
+ name=dict(required=True),
+ value=dict(required=True),
+ state=dict(default="present"),
+ ),
+ supports_check_mode=True
+ )
+
+ opendj_bindir = module.params['opendj_bindir']
+ hostname = module.params['hostname']
+ port = module.params['port']
+ username = module.params['username']
+ password = module.params['password']
+ passwordfile = module.params['passwordfile']
+ backend_name = module.params['backend']
+ name = module.params['name']
+ value = module.params['value']
+ state = module.params['state']
+
+ if module.params["password"] is not None:
+ password_method = ['-w', password]
+ elif module.params["passwordfile"] is not None:
+ password_method = ['-j', passwordfile]
+ else:
+ module.fail_json(msg="No credentials are given. Use either 'password' or 'passwordfile'")
+
+ if module.params["passwordfile"] and module.params["password"]:
+ module.fail_json(msg="only one of 'password' or 'passwordfile' can be set")
+
+ opendj = BackendProp(module)
+ validate = opendj.get_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name)
+
+ if validate:
+ if not opendj.validate_data(data=validate, name=name, value=value):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if opendj.set_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name,
+ name=name,
+ value=value):
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/infrastructure/__init__.py b/infrastructure/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/infrastructure/foreman/__init__.py b/infrastructure/foreman/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/infrastructure/foreman/foreman.py b/infrastructure/foreman/foreman.py
new file mode 100644
index 00000000000..d7dcb5f2959
--- /dev/null
+++ b/infrastructure/foreman/foreman.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2016, Eric D Helms
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: foreman
+short_description: Manage Foreman Resources
+description:
+ - Allows the management of Foreman resources inside your Foreman server
+version_added: "2.3"
+author: "Eric D Helms (@ehelms)"
+requirements:
+ - "nailgun >= 0.28.0"
+ - "python >= 2.6"
+ - datetime
+options:
+ server_url:
+ description:
+ - URL of Foreman server
+ required: true
+ username:
+ description:
+ - Username on Foreman server
+ required: true
+ password:
+ description:
+ - Password for user accessing Foreman server
+ required: true
+ entity:
+ description:
+ - The Foreman resource that the action will be performed on (e.g. organization, host)
+ required: true
+ params:
+ description:
+ - Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description)
+ required: true
+'''
+
+EXAMPLES = '''
+- name: "Create CI Organization"
+ local_action:
+ module: foreman
+ username: "admin"
+ password: "admin"
+ server_url: "https://fakeserver.com"
+ entity: "organization"
+ params:
+ name: "My Cool New Organization"
+'''
+
+RETURN = '''# '''
+
+import datetime
+
+try:
+ from nailgun import entities, entity_fields
+ from nailgun.config import ServerConfig
+ HAS_NAILGUN_PACKAGE = True
+except:
+ HAS_NAILGUN_PACKAGE = False
+
+class NailGun(object):
+ def __init__(self, server, entities, module):
+ self._server = server
+ self._entities = entities
+ self._module = module
+
+ def find_organization(self, name, **params):
+ org = self._entities.Organization(self._server, name=name, **params)
+ response = org.search(set(), {'search': 'name={}'.format(name)})
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View found for %s" % name)
+
+ def organization(self, params):
+ name = params['name']
+ del params['name']
+ org = self.find_organization(name, **params)
+
+ if org:
+ org = self._entities.Organization(self._server, name=name, id=org.id, **params)
+ org.update()
+ else:
+ org = self._entities.Organization(self._server, name=name, **params)
+ org.create()
+
+ return True
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(required=True),
+ username=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ entity=dict(required=True, no_log=False),
+ verify_ssl=dict(required=False, type='bool', default=False),
+ params=dict(required=True, no_log=True, type='dict'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_NAILGUN_PACKAGE:
+ module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+ entity = module.params['entity']
+ params = module.params['params']
+ verify_ssl = module.params['verify_ssl']
+
+ server = ServerConfig(
+ url=server_url,
+ auth=(username, password),
+ verify=verify_ssl
+ )
+ ng = NailGun(server, entities, module)
+
+ # Lets make an connection to the server with username and password
+ try:
+ org = entities.Organization(server)
+ org.search()
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
+
+ if entity == 'organization':
+ ng.organization(params)
+ module.exit_json(changed=True, result="%s updated" % entity)
+ else:
+ module.fail_json(changed=False, result="Unsupported entity supplied")
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/infrastructure/foreman/katello.py b/infrastructure/foreman/katello.py
new file mode 100644
index 00000000000..86b7be0622c
--- /dev/null
+++ b/infrastructure/foreman/katello.py
@@ -0,0 +1,533 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2016, Eric D Helms
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: katello
+short_description: Manage Katello Resources
+description:
+ - Allows the management of Katello resources inside your Foreman server
+version_added: "2.3"
+author: "Eric D Helms (@ehelms)"
+requirements:
+ - "nailgun >= 0.28.0"
+ - "python >= 2.6"
+ - datetime
+options:
+ server_url:
+ description:
+ - URL of Foreman server
+ required: true
+ username:
+ description:
+ - Username on Foreman server
+ required: true
+ password:
+ description:
+ - Password for user accessing Foreman server
+ required: true
+ entity:
+ description:
+ - The Foreman resource that the action will be performed on (e.g. organization, host)
+ required: true
+ params:
+ description:
+ - Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description)
+ required: true
+'''
+
+EXAMPLES = '''
+Simple Example:
+
+- name: "Create Product"
+ local_action:
+ module: katello
+ username: "admin"
+ password: "admin"
+ server_url: "https://fakeserver.com"
+ entity: "product"
+ params:
+ name: "Centos 7"
+
+Abstraction Example:
+
+katello.yml
+---
+- name: "{{ name }}"
+ local_action:
+ module: katello
+ username: "admin"
+ password: "admin"
+ server_url: "https://fakeserver.com"
+ entity: "{{ entity }}"
+ params: "{{ params }}"
+
+tasks.yml
+---
+- include: katello.yml
+ vars:
+ name: "Create Dev Environment"
+ entity: "lifecycle_environment"
+ params:
+ name: "Dev"
+ prior: "Library"
+ organization: "Default Organization"
+
+- include: katello.yml
+ vars:
+ name: "Create Centos Product"
+ entity: "product"
+ params:
+ name: "Centos 7"
+ organization: "Default Organization"
+
+- include: katello.yml
+ vars:
+ name: "Create 7.2 Repository"
+ entity: "repository"
+ params:
+ name: "Centos 7.2"
+ product: "Centos 7"
+ organization: "Default Organization"
+ content_type: "yum"
+ url: "http://mirror.centos.org/centos/7/os/x86_64/"
+
+- include: katello.yml
+ vars:
+ name: "Create Centos 7 View"
+ entity: "content_view"
+ params:
+ name: "Centos 7 View"
+ organization: "Default Organization"
+ repositories:
+ - name: "Centos 7.2"
+ product: "Centos 7"
+
+- include: katello.yml
+ vars:
+ name: "Enable RHEL Product"
+ entity: "repository_set"
+ params:
+ name: "Red Hat Enterprise Linux 7 Server (RPMs)"
+ product: "Red Hat Enterprise Linux Server"
+ organization: "Default Organization"
+ basearch: "x86_64"
+ releasever: "7"
+'''
+
+RETURN = '''# '''
+
+import datetime
+
+try:
+ from nailgun import entities, entity_fields, entity_mixins
+ from nailgun.config import ServerConfig
+ HAS_NAILGUN_PACKAGE = True
+except:
+ HAS_NAILGUN_PACKAGE = False
+
+
+class NailGun(object):
+ def __init__(self, server, entities, module):
+ self._server = server
+ self._entities = entities
+ self._module = module
+ entity_mixins.TASK_TIMEOUT = 1000
+
+ def find_organization(self, name, **params):
+ org = self._entities.Organization(self._server, name=name, **params)
+ response = org.search(set(), {'search': 'name={}'.format(name)})
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No organization found for %s" % name)
+
+ def find_lifecycle_environment(self, name, organization):
+ org = self.find_organization(organization)
+
+ lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
+ response = lifecycle_env.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
+
+ def find_product(self, name, organization):
+ org = self.find_organization(organization)
+
+ product = self._entities.Product(self._server, name=name, organization=org)
+ response = product.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Product found for %s" % name)
+
+ def find_repository(self, name, product, organization):
+ product = self.find_product(product, organization)
+
+ repository = self._entities.Repository(self._server, name=name, product=product)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ response = repository.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Repository found for %s" % name)
+
+ def find_content_view(self, name, organization):
+ org = self.find_organization(organization)
+
+ content_view = self._entities.ContentView(self._server, name=name, organization=org)
+ response = content_view.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View found for %s" % name)
+
+ def organization(self, params):
+ name = params['name']
+ del params['name']
+ org = self.find_organization(name, **params)
+
+ if org:
+ org = self._entities.Organization(self._server, name=name, id=org.id, **params)
+ org.update()
+ else:
+ org = self._entities.Organization(self._server, name=name, **params)
+ org.create()
+
+ return True
+
+ def manifest(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+
+ try:
+ file = open(os.getcwd() + params['content'], 'r')
+ content = file.read()
+ finally:
+ file.close()
+
+ manifest = self._entities.Subscription(self._server)
+
+ try:
+ manifest.upload(
+ data={'organization_id': org.id},
+ files={'content': content}
+ )
+ return True
+ except Exception:
+ e = get_exception()
+
+ if "Import is the same as existing data" in e.message:
+ return True
+ else:
+ self._module.fail_json(msg="Manifest import failed with %s" % e)
+
+ def product(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+
+ product = self._entities.Product(self._server, **params)
+ response = product.search()
+
+ if len(response) == 1:
+ product.id = response[0].id
+ product.update()
+ else:
+ product.create()
+
+ return True
+
+ def sync_product(self, params):
+ org = self.find_organization(params['organization'])
+ product = self.find_product(params['name'], org.name)
+
+ return product.sync()
+
+ def repository(self, params):
+ product = self.find_product(params['product'], params['organization'])
+ params['product'] = product.id
+ del params['organization']
+
+ repository = self._entities.Repository(self._server, **params)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ response = repository.search()
+
+ if len(response) == 1:
+ repository.id = response[0].id
+ repository.update()
+ else:
+ repository.create()
+
+ return True
+
+ def sync_repository(self, params):
+ org = self.find_organization(params['organization'])
+ repository = self.find_repository(params['name'], params['product'], org.name)
+
+ return repository.sync()
+
+ def repository_set(self, params):
+ product = self.find_product(params['product'], params['organization'])
+ del params['product']
+ del params['organization']
+
+ if not product:
+ return False
+ else:
+ reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
+ reposet = reposet.search()[0]
+
+ formatted_name = [params['name'].replace('(', '').replace(')', '')]
+ formatted_name.append(params['basearch'])
+
+ if params['releasever']:
+ formatted_name.append(params['releasever'])
+
+ formatted_name = ' '.join(formatted_name)
+
+ repository = self._entities.Repository(self._server, product=product, name=formatted_name)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ repository = repository.search()
+
+ if len(repository) == 0:
+ reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
+
+ return True
+
+ def sync_plan(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+ params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
+
+ products = params['products']
+ del params['products']
+
+ sync_plan = self._entities.SyncPlan(
+ self._server,
+ name=params['name'],
+ organization=org
+ )
+ response = sync_plan.search()
+
+ sync_plan.sync_date = params['sync_date']
+ sync_plan.interval = params['interval']
+
+ if len(response) == 1:
+ sync_plan.id = response[0].id
+ sync_plan.update()
+ else:
+ response = sync_plan.create()
+ sync_plan.id = response[0].id
+
+ if products:
+ ids = []
+
+ for name in products:
+ product = self.find_product(name, org.name)
+ ids.append(product.id)
+
+ sync_plan.add_products(data={'product_ids': ids})
+
+ return True
+
+ def content_view(self, params):
+ org = self.find_organization(params['organization'])
+
+ content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
+ response = content_view.search()
+
+ if len(response) == 1:
+ content_view.id = response[0].id
+ content_view.update()
+ else:
+ content_view = content_view.create()
+
+ if params['repositories']:
+ repos = []
+
+ for repository in params['repositories']:
+ repository = self.find_repository(repository['name'], repository['product'], org.name)
+ repos.append(repository)
+
+ content_view.repository = repos
+ content_view.update(['repository'])
+
+ def find_content_view(self, name, organization):
+ org = self.find_organization(organization)
+
+ content_view = self._entities.ContentView(self._server, name=name, organization=org)
+ response = content_view.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View found for %s" % name)
+
+ def find_content_view_version(self, name, organization, environment):
+ env = self.find_lifecycle_environment(environment, organization)
+ content_view = self.find_content_view(name, organization)
+
+ content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
+ response = content_view_version.search(['content_view'], {'environment_id': env.id})
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View version found for %s" % response)
+
+ def publish(self, params):
+ content_view = self.find_content_view(params['name'], params['organization'])
+
+ return content_view.publish()
+
+ def promote(self, params):
+ to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
+ version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
+
+ data = {'environment_id': to_environment.id}
+ return version.promote(data=data)
+
+ def lifecycle_environment(self, params):
+ org = self.find_organization(params['organization'])
+ prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
+
+ lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
+ response = lifecycle_env.search()
+
+ if len(response) == 1:
+ lifecycle_env.id = response[0].id
+ lifecycle_env.update()
+ else:
+ lifecycle_env.create()
+
+ return True
+
+ def activation_key(self, params):
+ org = self.find_organization(params['organization'])
+
+ activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
+ response = activation_key.search()
+
+ if len(response) == 1:
+ activation_key.id = response[0].id
+ activation_key.update()
+ else:
+ activation_key.create()
+
+ if params['content_view']:
+ content_view = self.find_content_view(params['content_view'], params['organization'])
+ lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
+
+ activation_key.content_view = content_view
+ activation_key.environment = lifecycle_environment
+ activation_key.update()
+
+ return True
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(required=True),
+ username=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ entity=dict(required=True, no_log=False),
+ action=dict(required=False, no_log=False),
+ verify_ssl=dict(required=False, type='bool', default=False),
+ params=dict(required=True, no_log=True, type='dict'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_NAILGUN_PACKAGE:
+ module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+ entity = module.params['entity']
+ action = module.params['action']
+ params = module.params['params']
+ verify_ssl = module.params['verify_ssl']
+
+ server = ServerConfig(
+ url=server_url,
+ auth=(username, password),
+ verify=verify_ssl
+ )
+ ng = NailGun(server, entities, module)
+
+ # Lets make an connection to the server with username and password
+ try:
+ org = entities.Organization(server)
+ org.search()
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
+
+ result = False
+
+ if entity == 'product':
+ if action == 'sync':
+ result = ng.sync_product(params)
+ else:
+ result = ng.product(params)
+ elif entity == 'repository':
+ if action == 'sync':
+ result = ng.sync_repository(params)
+ else:
+ result = ng.repository(params)
+ elif entity == 'manifest':
+ result = ng.manifest(params)
+ elif entity == 'repository_set':
+ result = ng.repository_set(params)
+ elif entity == 'sync_plan':
+ result = ng.sync_plan(params)
+ elif entity == 'content_view':
+ if action == 'publish':
+ result = ng.publish(params)
+ elif action == 'promote':
+ result = ng.promote(params)
+ else:
+ result = ng.content_view(params)
+ elif entity == 'lifecycle_environment':
+ result = ng.lifecycle_environment(params)
+ elif entity == 'activation_key':
+ result = ng.activation_key(params)
+ else:
+ module.fail_json(changed=False, result="Unsupported entity supplied")
+
+ module.exit_json(changed=result, result="%s updated" % entity)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py
index fc69f490fad..428bec096f3 100644
--- a/messaging/rabbitmq_binding.py
+++ b/messaging/rabbitmq_binding.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rabbitmq_binding
@@ -28,7 +32,7 @@
short_description: This module manages rabbitMQ bindings
description:
- This module uses rabbitMQ Rest API to create/delete bindings
-requirements: [ python requests ]
+requirements: [ "requests >= 1.0.0" ]
options:
state:
description:
@@ -94,10 +98,18 @@
EXAMPLES = '''
# Bind myQueue to directExchange with routing key info
-- rabbitmq_binding: name=directExchange destination=myQueue type=queue routing_key=info
+- rabbitmq_binding:
+ name: directExchange
+ destination: myQueue
+ type: queue
+ routing_key: info
# Bind directExchange to topicExchange with routing key *.info
-- rabbitmq_binding: name=topicExchange destination=topicExchange type=exchange routing_key="*.info"
+- rabbitmq_binding:
+ name: topicExchange
+ destination: topicExchange
+ type: exchange
+ routing_key: *.info
'''
import requests
@@ -127,14 +139,19 @@ def main():
else:
dest_type="e"
+ if module.params['routing_key'] == "":
+ props = "~"
+ else:
+ props = urllib.quote(module.params['routing_key'],'')
+
url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
- module.params['name'],
+ urllib.quote(module.params['name'],''),
dest_type,
- module.params['destination'],
- urllib.quote(module.params['routing_key'],'')
+ urllib.quote(module.params['destination'],''),
+ props
)
# Check if exchange already exists
@@ -173,9 +190,9 @@ def main():
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
- module.params['name'],
+ urllib.quote(module.params['name'],''),
dest_type,
- module.params['destination']
+ urllib.quote(module.params['destination'],'')
)
r = requests.post(
@@ -211,4 +228,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py
index fb74298879b..a5e1e353dac 100644
--- a/messaging/rabbitmq_exchange.py
+++ b/messaging/rabbitmq_exchange.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rabbitmq_exchange
@@ -28,7 +32,7 @@
short_description: This module manages rabbitMQ exchanges
description:
- This module uses rabbitMQ Rest API to create/delete exchanges
-requirements: [ python requests ]
+requirements: [ "requests >= 1.0.0" ]
options:
name:
description:
@@ -100,10 +104,14 @@
EXAMPLES = '''
# Create direct exchange
-- rabbitmq_exchange: name=directExchange
+- rabbitmq_exchange:
+ name: directExchange
# Create topic exchange on vhost
-- rabbitmq_exchange: name=topicExchange type=topic vhost=myVhost
+- rabbitmq_exchange:
+ name: topicExchange
+ type: topic
+ vhost: myVhost
'''
import requests
@@ -120,9 +128,9 @@ def main():
login_host = dict(default='localhost', type='str'),
login_port = dict(default='15672', type='str'),
vhost = dict(default='/', type='str'),
- durable = dict(default=True, choices=BOOLEANS, type='bool'),
- auto_delete = dict(default=False, choices=BOOLEANS, type='bool'),
- internal = dict(default=False, choices=BOOLEANS, type='bool'),
+ durable = dict(default=True, type='bool'),
+ auto_delete = dict(default=False, type='bool'),
+ internal = dict(default=False, type='bool'),
exchange_type = dict(default='direct', aliases=['type'], type='str'),
arguments = dict(default=dict(), type='dict')
),
@@ -133,9 +141,9 @@ def main():
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
- module.params['name']
+ urllib.quote(module.params['name'],'')
)
-
+
# Check if exchange already exists
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
@@ -215,4 +223,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/messaging/rabbitmq_parameter.py b/messaging/rabbitmq_parameter.py
index 6be18bdce3d..32959f2e562 100644
--- a/messaging/rabbitmq_parameter.py
+++ b/messaging/rabbitmq_parameter.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rabbitmq_parameter
@@ -63,10 +67,11 @@
EXAMPLES = """
# Set the federation parameter 'local_username' to a value of 'guest' (in quotes)
-- rabbitmq_parameter: component=federation
- name=local-username
- value='"guest"'
- state=present
+- rabbitmq_parameter:
+ component: federation
+ name: local-username
+ value: '"guest"'
+ state: present
"""
class RabbitMqParameter(object):
@@ -96,12 +101,17 @@ def get(self):
component, name, value = param_item.split('\t')
if component == self.component and name == self.name:
- self._value = value
+ self._value = json.loads(value)
return True
return False
def set(self):
- self._exec(['set_parameter', '-p', self.vhost, self.component, self.name, self.value])
+ self._exec(['set_parameter',
+ '-p',
+ self.vhost,
+ self.component,
+ self.name,
+ json.dumps(self.value)])
def delete(self):
self._exec(['clear_parameter', '-p', self.vhost, self.component, self.name])
@@ -126,6 +136,8 @@ def main():
component = module.params['component']
name = module.params['name']
value = module.params['value']
+ if isinstance(value, str):
+ value = json.loads(value)
vhost = module.params['vhost']
state = module.params['state']
node = module.params['node']
@@ -149,4 +161,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/messaging/rabbitmq_plugin.py b/messaging/rabbitmq_plugin.py
index b52de337e2e..cc16966dcf4 100644
--- a/messaging/rabbitmq_plugin.py
+++ b/messaging/rabbitmq_plugin.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rabbitmq_plugin
@@ -56,7 +60,9 @@
EXAMPLES = '''
# Enables the rabbitmq_management plugin
-- rabbitmq_plugin: names=rabbitmq_management state=enabled
+- rabbitmq_plugin:
+ names: rabbitmq_management
+ state: enabled
'''
import os
@@ -147,4 +153,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/messaging/rabbitmq_policy.py b/messaging/rabbitmq_policy.py
index 81d7068ec46..6d5a053f3d6 100644
--- a/messaging/rabbitmq_policy.py
+++ b/messaging/rabbitmq_policy.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rabbitmq_policy
@@ -38,6 +42,13 @@
- The name of the vhost to apply to.
required: false
default: /
+ apply_to:
+ description:
+ - What the policy applies to. Requires RabbitMQ 3.2.0 or later.
+ required: false
+ default: all
+ choices: [all, exchanges, queues]
+ version_added: "2.1"
pattern:
description:
- A regex of queues to apply the policy to.
@@ -67,13 +78,19 @@
EXAMPLES = '''
- name: ensure the default vhost contains the HA policy via a dict
- rabbitmq_policy: name=HA pattern='.*'
+ rabbitmq_policy:
+ name: HA
+ pattern: .*
args:
tags:
- "ha-mode": all
+ ha-mode: all
- name: ensure the default vhost contains the HA policy
- rabbitmq_policy: name=HA pattern='.*' tags="ha-mode=all"
+ rabbitmq_policy:
+ name: HA
+ pattern: .*
+ tags:
+ - ha-mode: all
'''
class RabbitMqPolicy(object):
def __init__(self, module, name):
@@ -81,6 +98,7 @@ def __init__(self, module, name):
self._name = name
self._vhost = module.params['vhost']
self._pattern = module.params['pattern']
+ self._apply_to = module.params['apply_to']
self._tags = module.params['tags']
self._priority = module.params['priority']
self._node = module.params['node']
@@ -112,6 +130,9 @@ def set(self):
args.append(json.dumps(self._tags))
args.append('--priority')
args.append(self._priority)
+ if (self._apply_to != 'all'):
+ args.append('--apply-to')
+ args.append(self._apply_to)
return self._exec(args)
def clear(self):
@@ -123,6 +144,7 @@ def main():
name=dict(required=True),
vhost=dict(default='/'),
pattern=dict(required=True),
+ apply_to=dict(default='all', choices=['all', 'exchanges', 'queues']),
tags=dict(type='dict', required=True),
priority=dict(default='0'),
node=dict(default='rabbit'),
@@ -153,4 +175,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py
index 5a403a6b602..6b49fea9f06 100644
--- a/messaging/rabbitmq_queue.py
+++ b/messaging/rabbitmq_queue.py
@@ -19,16 +19,20 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rabbitmq_queue
-author: "Manuel Sousa (@manuel-sousa)"
+author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
short_description: This module manages rabbitMQ queues
description:
- This module uses rabbitMQ Rest API to create/delete queues
-requirements: [ python requests ]
+requirements: [ "requests >= 1.0.0" ]
options:
name:
description:
@@ -114,10 +118,15 @@
EXAMPLES = '''
# Create a queue
-- rabbitmq_queue: name=myQueue
+- rabbitmq_queue:
+ name: myQueue
# Create a queue on remote host
-- rabbitmq_queue: name=myRemoteQueue login_user=user login_password=secret login_host=remote.example.org
+- rabbitmq_queue:
+ name: myRemoteQueue
+ login_user: user
+ login_password: secret
+ login_host: remote.example.org
'''
import requests
@@ -134,8 +143,8 @@ def main():
login_host = dict(default='localhost', type='str'),
login_port = dict(default='15672', type='str'),
vhost = dict(default='/', type='str'),
- durable = dict(default=True, choices=BOOLEANS, type='bool'),
- auto_delete = dict(default=False, choices=BOOLEANS, type='bool'),
+ durable = dict(default=True, type='bool'),
+ auto_delete = dict(default=False, type='bool'),
message_ttl = dict(default=None, type='int'),
auto_expires = dict(default=None, type='int'),
max_length = dict(default=None, type='int'),
@@ -152,7 +161,7 @@ def main():
urllib.quote(module.params['vhost'],''),
module.params['name']
)
-
+
# Check if queue already exists
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
@@ -260,4 +269,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/messaging/rabbitmq_user.py b/messaging/rabbitmq_user.py
index b12178e08ea..02afe298cb2 100644
--- a/messaging/rabbitmq_user.py
+++ b/messaging/rabbitmq_user.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rabbitmq_user
@@ -45,9 +49,19 @@
- User tags specified as comma delimited
required: false
default: null
+ permissions:
+ description:
+ - a list of dicts, each dict contains vhost, configure_priv, write_priv, and read_priv,
+ and represents a permission rule for that vhost.
+ - This option should be preferable when you care about all permissions of the user.
+ - You should use vhost, configure_priv, write_priv, and read_priv options instead
+ if you care about permissions for just some vhosts.
+ required: false
+ default: []
vhost:
description:
- vhost to apply access privileges.
+ - This option will be ignored when permissions option is used.
required: false
default: /
node:
@@ -61,6 +75,7 @@
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
+ - This option will be ignored when permissions option is used.
required: false
default: ^$
write_priv:
@@ -68,6 +83,7 @@
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
+ - This option will be ignored when permissions option is used.
required: false
default: ^$
read_priv:
@@ -75,6 +91,7 @@
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
+ - This option will be ignored when permissions option is used.
required: false
default: ^$
force:
@@ -92,18 +109,33 @@
'''
EXAMPLES = '''
-# Add user to server and assign full access control
-- rabbitmq_user: user=joe
- password=changeme
- vhost=/
- configure_priv=.*
- read_priv=.*
- write_priv=.*
- state=present
+# Add user to server and assign full access control on / vhost.
+# The user might have permission rules for other vhost but you don't care.
+- rabbitmq_user:
+ user: joe
+ password: changeme
+ vhost: /
+ configure_priv: .*
+ read_priv: .*
+ write_priv: .*
+ state: present
+
+# Add user to server and assign full access control on / vhost.
+# The user doesn't have permission rules for other vhosts
+- rabbitmq_user:
+ user: joe
+ password: changeme
+ permissions:
+ - vhost: /
+ configure_priv: .*
+ read_priv: .*
+ write_priv: .*
+ state: present
'''
class RabbitMqUser(object):
- def __init__(self, module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node):
+ def __init__(self, module, username, password, tags, permissions,
+ node, bulk_permissions=False):
self.module = module
self.username = username
self.password = password
@@ -113,21 +145,18 @@ def __init__(self, module, username, password, tags, vhost, configure_priv, writ
else:
self.tags = tags.split(',')
- permissions = dict(
- vhost=vhost,
- configure_priv=configure_priv,
- write_priv=write_priv,
- read_priv=read_priv
- )
self.permissions = permissions
+ self.bulk_permissions = bulk_permissions
self._tags = None
- self._permissions = None
+ self._permissions = []
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
- cmd = [self._rabbitmqctl, '-q', '-n', self.node]
+ cmd = [self._rabbitmqctl, '-q']
+ if self.node is not None:
+ cmd.extend(['-n', self.node])
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
@@ -136,6 +165,9 @@ def get(self):
users = self._exec(['list_users'], True)
for user_tag in users:
+ if '\t' not in user_tag:
+ continue
+
user, tags = user_tag.split('\t')
if user == self.username:
@@ -154,12 +186,18 @@ def get(self):
def _get_permissions(self):
perms_out = self._exec(['list_user_permissions', self.username], True)
+ perms_list = list()
for perm in perms_out:
vhost, configure_priv, write_priv, read_priv = perm.split('\t')
- if vhost == self.permissions['vhost']:
- return dict(vhost=vhost, configure_priv=configure_priv, write_priv=write_priv, read_priv=read_priv)
-
- return dict()
+ if not self.bulk_permissions:
+ if vhost == self.permissions[0]['vhost']:
+ perms_list.append(dict(vhost=vhost, configure_priv=configure_priv,
+ write_priv=write_priv, read_priv=read_priv))
+ break
+ else:
+ perms_list.append(dict(vhost=vhost, configure_priv=configure_priv,
+ write_priv=write_priv, read_priv=read_priv))
+ return perms_list
def add(self):
if self.password is not None:
@@ -175,14 +213,21 @@ def set_tags(self):
self._exec(['set_user_tags', self.username] + self.tags)
def set_permissions(self):
- cmd = ['set_permissions']
- cmd.append('-p')
- cmd.append(self.permissions['vhost'])
- cmd.append(self.username)
- cmd.append(self.permissions['configure_priv'])
- cmd.append(self.permissions['write_priv'])
- cmd.append(self.permissions['read_priv'])
- self._exec(cmd)
+ for permission in self._permissions:
+ if permission not in self.permissions:
+ cmd = ['clear_permissions', '-p']
+ cmd.append(permission['vhost'])
+ cmd.append(self.username)
+ self._exec(cmd)
+ for permission in self.permissions:
+ if permission not in self._permissions:
+ cmd = ['set_permissions', '-p']
+ cmd.append(permission['vhost'])
+ cmd.append(self.username)
+ cmd.append(permission['configure_priv'])
+ cmd.append(permission['write_priv'])
+ cmd.append(permission['read_priv'])
+ self._exec(cmd)
def has_tags_modifications(self):
return set(self.tags) != set(self._tags)
@@ -195,13 +240,14 @@ def main():
user=dict(required=True, aliases=['username', 'name']),
password=dict(default=None),
tags=dict(default=None),
+ permissions=dict(default=list(), type='list'),
vhost=dict(default='/'),
configure_priv=dict(default='^$'),
write_priv=dict(default='^$'),
read_priv=dict(default='^$'),
force=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
- node=dict(default='rabbit')
+ node=dict(default=None)
)
module = AnsibleModule(
argument_spec=arg_spec,
@@ -211,6 +257,7 @@ def main():
username = module.params['user']
password = module.params['password']
tags = module.params['tags']
+ permissions = module.params['permissions']
vhost = module.params['vhost']
configure_priv = module.params['configure_priv']
write_priv = module.params['write_priv']
@@ -219,7 +266,19 @@ def main():
state = module.params['state']
node = module.params['node']
- rabbitmq_user = RabbitMqUser(module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node)
+ bulk_permissions = True
+ if permissions == []:
+ perm = {
+ 'vhost': vhost,
+ 'configure_priv': configure_priv,
+ 'write_priv': write_priv,
+ 'read_priv': read_priv
+ }
+ permissions.append(perm)
+ bulk_permissions = False
+
+ rabbitmq_user = RabbitMqUser(module, username, password, tags, permissions,
+ node, bulk_permissions=bulk_permissions)
changed = False
if rabbitmq_user.get():
@@ -250,4 +309,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/messaging/rabbitmq_vhost.py b/messaging/rabbitmq_vhost.py
index dbde32393cb..635d8b77bbe 100644
--- a/messaging/rabbitmq_vhost.py
+++ b/messaging/rabbitmq_vhost.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rabbitmq_vhost
@@ -55,7 +59,9 @@
EXAMPLES = '''
# Ensure that the vhost /test exists.
-- rabbitmq_vhost: name=/test state=present
+- rabbitmq_vhost:
+ name: /test
+ state: present
'''
class RabbitMqVhost(object):
@@ -144,4 +150,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/airbrake_deployment.py b/monitoring/airbrake_deployment.py
index a58df024182..124a801ea94 100644
--- a/monitoring/airbrake_deployment.py
+++ b/monitoring/airbrake_deployment.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: airbrake_deployment
@@ -51,7 +55,7 @@
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
- default: "https://airbrake.io/deploys"
+ default: "https://airbrake.io/deploys.txt"
version_added: "1.5"
validate_certs:
description:
@@ -65,10 +69,11 @@
'''
EXAMPLES = '''
-- airbrake_deployment: token=AAAAAA
- environment='staging'
- user='ansible'
- revision=4.2
+- airbrake_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: '4.2'
'''
import urllib
@@ -81,7 +86,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- token=dict(required=True),
+ token=dict(required=True, no_log=True),
environment=dict(required=True),
user=dict(required=False),
repo=dict(required=False),
@@ -127,5 +132,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
-
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/bigpanda.py b/monitoring/bigpanda.py
index 0139f3a598e..90b37841526 100644
--- a/monitoring/bigpanda.py
+++ b/monitoring/bigpanda.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: bigpanda
@@ -79,23 +83,34 @@
'''
EXAMPLES = '''
-- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started
+- bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: started
...
-- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=finished
-
-or using a deployment object:
-- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started
- register: deployment
-
-- bigpanda: state=finished
- args: deployment
-
-If outside servers aren't reachable from your machine, use local_action and pass the hostname:
-- local_action: bigpanda component=myapp version=1.3 hosts={{ansible_hostname}} token={{ bigpanda_token }} state=started
+- bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: finished
+
+# If outside servers aren't reachable from your machine, use delegate_to and override hosts:
+- bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ hosts: '{{ ansible_hostname }}'
+ state: started
+ delegate_to: localhost
register: deployment
...
-- local_action: bigpanda state=finished
- args: deployment
+- bigpanda:
+ component: '{{ deployment.component }}'
+ version: '{{ deployment.version }}'
+ token: '{{ deployment.token }}'
+ state: finished
+ delegate_to: localhost
'''
# ===========================================
@@ -109,7 +124,7 @@ def main():
argument_spec=dict(
component=dict(required=True, aliases=['name']),
version=dict(required=True),
- token=dict(required=True),
+ token=dict(required=True, no_log=True),
state=dict(required=True, choices=['started', 'finished', 'failed']),
hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']),
env=dict(required=False),
@@ -178,11 +193,13 @@ def main():
module.exit_json(changed=True, **deployment)
else:
module.fail_json(msg=json.dumps(info))
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
+from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
diff --git a/monitoring/boundary_meter.py b/monitoring/boundary_meter.py
index 99cb74f870d..ccbf014026f 100644
--- a/monitoring/boundary_meter.py
+++ b/monitoring/boundary_meter.py
@@ -22,10 +22,9 @@
along with Ansible. If not, see .
"""
-import json
-import datetime
-import base64
-import os
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = '''
@@ -80,15 +79,33 @@
'''
+import base64
+import os
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
api_host = "api.boundary.com"
config_directory = "/etc/bprobe"
+
# "resource" like thing or apikey?
def auth_encode(apikey):
auth = base64.standard_b64encode(apikey)
auth.replace("\n", "")
return auth
-
+
+
def build_url(name, apiid, action, meter_id=None, cert_type=None):
if action == "create":
return 'https://%s/%s/meters' % (api_host, apiid)
@@ -190,7 +207,7 @@ def delete_meter(module, name, apiid, apikey):
try:
cert_file = '%s/%s.pem' % (config_directory,cert_type)
os.remove(cert_file)
- except OSError, e:
+ except OSError:
module.fail_json("Failed to remove " + cert_type + ".pem file")
return 0, "Meter " + name + " deleted"
@@ -212,8 +229,8 @@ def download_request(module, name, apiid, apikey, cert_type):
cert_file = open(cert_file_path, 'w')
cert_file.write(body)
cert_file.close()
- os.chmod(cert_file_path, 0600)
- except:
+ os.chmod(cert_file_path, int('0600', 8))
+ except:
module.fail_json("Could not write to certificate file")
return True
@@ -248,9 +265,7 @@ def main():
module.exit_json(status=result,changed=True)
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
+
if __name__ == '__main__':
main()
diff --git a/monitoring/circonus_annotation.py b/monitoring/circonus_annotation.py
index ae5c98c87a1..5e9029e9fb0 100644
--- a/monitoring/circonus_annotation.py
+++ b/monitoring/circonus_annotation.py
@@ -17,9 +17,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import requests
-import time
-import json
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -67,25 +68,34 @@
# Create a simple annotation event with a source, defaults to start and end time of now
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
- title: 'App Config Change'
- description: 'This is a detailed description of the config change'
- category: 'This category groups like annotations'
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
# Create an annotation with a duration of 5 minutes and a default start time of now
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
- title: 'App Config Change'
- description: 'This is a detailed description of the config change'
- category: 'This category groups like annotations'
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
duration: 300
# Create an annotation with a start_time and end_time
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
- title: 'App Config Change'
- description: 'This is a detailed description of the config change'
- category: 'This category groups like annotations'
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
start_time: 1395940006
end_time: 1395954407
'''
+import json
+import time
+
+import requests
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+
def post_annotation(annotation, api_key):
''' Takes annotation dict and api_key string'''
base_url = 'https://api.circonus.com/v2'
@@ -95,6 +105,7 @@ def post_annotation(annotation, api_key):
resp.raise_for_status()
return resp
+
def create_annotation(module):
''' Takes ansible module object '''
annotation = {}
@@ -116,6 +127,8 @@ def create_annotation(module):
annotation['description'] = module.params['description']
annotation['title'] = module.params['title']
return annotation
+
+
def build_headers(api_token):
'''Takes api token, returns headers with it included.'''
headers = {'X-Circonus-App-Name': 'ansible',
@@ -123,6 +136,7 @@ def build_headers(api_token):
'Accept': 'application/json'}
return headers
+
def main():
'''Main function, dispatches logic'''
module = AnsibleModule(
@@ -133,15 +147,17 @@ def main():
title=dict(required=True),
description=dict(required=True),
duration=dict(required=False, type='int'),
- api_key=dict(required=True)
+ api_key=dict(required=True, no_log=True)
)
)
annotation = create_annotation(module)
try:
resp = post_annotation(annotation, module.params['api_key'])
- except requests.exceptions.RequestException, err_str:
+ except requests.exceptions.RequestException:
+ err_str = get_exception()
module.fail_json(msg='Request Failed', reason=err_str)
module.exit_json(changed=True, annotation=resp.json())
-from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py
index 25e8ce052b6..4e3bf03b159 100644
--- a/monitoring/datadog_event.py
+++ b/monitoring/datadog_event.py
@@ -2,6 +2,7 @@
# -*- coding: utf-8 -*-
#
# Author: Artūras 'arturaz' Šlajus
+# Author: Naoya Nakazawa
#
# This module is proudly sponsored by iGeolise (www.igeolise.com) and
# Tiny Lab Productions (www.tinylabproductions.com).
@@ -21,6 +22,16 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# Import Datadog
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except:
+ HAS_DATADOG = False
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -30,7 +41,9 @@
- "Allows to post events to DataDog (www.datadoghq.com) service."
- "Uses http://docs.datadoghq.com/api/#events API."
version_added: "1.3"
-author: "Artūras `arturaz` Šlajus (@arturaz)"
+author:
+- "Artūras `arturaz` Šlajus (@arturaz)"
+- "Naoya Nakazawa (@n0ts)"
notes: []
requirements: []
options:
@@ -38,6 +51,10 @@
description: ["Your DataDog API key."]
required: true
default: null
+ app_key:
+ description: ["Your DataDog app key."]
+ required: true
+ version_added: "2.2"
title:
description: ["The event title."]
required: true
@@ -82,20 +99,27 @@
EXAMPLES = '''
# Post an event with low priority
-datadog_event: title="Testing from ansible" text="Test!" priority="low"
- api_key="6873258723457823548234234234"
+- datadog_event:
+ title: Testing from ansible
+ text: Test
+ priority: low
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
# Post an event with several tags
-datadog_event: title="Testing from ansible" text="Test!"
- api_key="6873258723457823548234234234"
- tags=aa,bb,#host:{{ inventory_hostname }}
+- datadog_event:
+ title: Testing from ansible
+ text: Test
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+ tags: 'aa,bb,#host:{{ inventory_hostname }}'
'''
-import socket
-
+# Import Datadog
def main():
module = AnsibleModule(
argument_spec=dict(
- api_key=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ app_key=dict(required=True, no_log=True),
title=dict(required=True),
text=dict(required=True),
date_happened=dict(required=False, default=None, type='int'),
@@ -108,51 +132,42 @@ def main():
choices=['error', 'warning', 'info', 'success']
),
aggregation_key=dict(required=False, default=None),
- source_type_name=dict(
- required=False, default='my apps',
- choices=['nagios', 'hudson', 'jenkins', 'user', 'my apps',
- 'feed', 'chef', 'puppet', 'git', 'bitbucket', 'fabric',
- 'capistrano']
- ),
validate_certs = dict(default='yes', type='bool'),
)
)
- post_event(module)
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg='datadogpy required for this module')
-def post_event(module):
- uri = "https://app.datadoghq.com/api/v1/events?api_key=%s" % module.params['api_key']
+ options = {
+ 'api_key': module.params['api_key'],
+ 'app_key': module.params['app_key']
+ }
- body = dict(
- title=module.params['title'],
- text=module.params['text'],
- priority=module.params['priority'],
- alert_type=module.params['alert_type']
- )
- if module.params['date_happened'] != None:
- body['date_happened'] = module.params['date_happened']
- if module.params['tags'] != None:
- body['tags'] = module.params['tags']
- if module.params['aggregation_key'] != None:
- body['aggregation_key'] = module.params['aggregation_key']
- if module.params['source_type_name'] != None:
- body['source_type_name'] = module.params['source_type_name']
+ initialize(**options)
+
+ _post_event(module)
+
+
+def _post_event(module):
+ try:
+ msg = api.Event.create(title=module.params['title'],
+ text=module.params['text'],
+ tags=module.params['tags'],
+ priority=module.params['priority'],
+ alert_type=module.params['alert_type'],
+ aggregation_key=module.params['aggregation_key'],
+ source_type_name='ansible')
+ if msg['status'] != 'ok':
+ module.fail_json(msg=msg)
- json_body = module.jsonify(body)
- headers = {"Content-Type": "application/json"}
+ module.exit_json(changed=True, msg=msg)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
- (response, info) = fetch_url(module, uri, data=json_body, headers=headers)
- if info['status'] == 200:
- response_body = response.read()
- response_json = module.from_json(response_body)
- if response_json['status'] == 'ok':
- module.exit_json(changed=True)
- else:
- module.fail_json(msg=response)
- else:
- module.fail_json(**info)
-# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py
index 9853d748c2c..50a067d8a2a 100644
--- a/monitoring/datadog_monitor.py
+++ b/monitoring/datadog_monitor.py
@@ -19,12 +19,9 @@
# along with Ansible. If not, see .
# import module snippets
-# Import Datadog
-try:
- from datadog import initialize, api
- HAS_DATADOG = True
-except:
- HAS_DATADOG = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -34,8 +31,7 @@
- "Manages monitors within Datadog"
- "Options like described on http://docs.datadoghq.com/api/"
version_added: "2.0"
-author: "Sebastian Kornehl (@skornehl)"
-notes: []
+author: "Sebastian Kornehl (@skornehl)"
requirements: [datadog]
options:
api_key:
@@ -48,20 +44,27 @@
description: ["The designated state of the monitor."]
required: true
choices: ['present', 'absent', 'muted', 'unmuted']
+ tags:
+ description: ["A list of tags to associate with your monitor when creating or updating. This can help you categorize and filter monitors."]
+ required: false
+ default: None
+ version_added: "2.2"
type:
- description: ["The type of the monitor."]
+ description:
+ - "The type of the monitor."
+ - The 'event alert'is available starting at Ansible 2.1
required: false
default: null
- choices: ['metric alert', 'service check']
+ choices: ['metric alert', 'service check', 'event alert']
query:
- description: ["he monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
+ description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
required: false
default: null
name:
description: ["The name of the alert."]
required: true
message:
- description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events."]
+ description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'."]
required: false
default: null
silenced:
@@ -93,9 +96,24 @@
required: false
default: False
thresholds:
- description: ["A dictionary of thresholds by status. Because service checks can have multiple thresholds, we don't define them directly in the query."]
+ description: ["A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have multiple thresholds, we don't define them directly in the query."]
required: false
default: {'ok': 1, 'critical': 1, 'warning': 1}
+ locked:
+ description: ["A boolean indicating whether changes to this monitor should be restricted to the creator or admins."]
+ required: false
+ default: False
+ version_added: "2.2"
+ require_full_window:
+ description: ["A boolean indicating whether this monitor needs a full window of data before it's evaluated. We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped."]
+ required: false
+ default: null
+ version_added: "2.3"
+ id:
+ description: ["The id of the alert. If set, will be used instead of the name to locate the alert."]
+ required: false
+ default: null
+ version_added: "2.3"
'''
EXAMPLES = '''
@@ -105,7 +123,7 @@
name: "Test monitor"
state: "present"
query: "datadog.agent.up".over("host:host1").last(2).count_by_status()"
- message: "Some message."
+ message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
@@ -132,25 +150,39 @@
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
'''
+# Import Datadog
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except:
+ HAS_DATADOG = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
def main():
module = AnsibleModule(
argument_spec=dict(
- api_key=dict(required=True),
- app_key=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ app_key=dict(required=True, no_log=True),
state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']),
- type=dict(required=False, choises=['metric alert', 'service check']),
+ type=dict(required=False, choises=['metric alert', 'service check', 'event alert']),
name=dict(required=True),
query=dict(required=False),
message=dict(required=False, default=None),
silenced=dict(required=False, default=None, type='dict'),
- notify_no_data=dict(required=False, default=False, choices=BOOLEANS),
+ notify_no_data=dict(required=False, default=False, type='bool'),
no_data_timeframe=dict(required=False, default=None),
timeout_h=dict(required=False, default=None),
renotify_interval=dict(required=False, default=None),
escalation_message=dict(required=False, default=None),
- notify_audit=dict(required=False, default=False, choices=BOOLEANS),
- thresholds=dict(required=False, type='dict', default={'ok': 1, 'critical': 1, 'warning': 1}),
+ notify_audit=dict(required=False, default=False, type='bool'),
+ thresholds=dict(required=False, type='dict', default=None),
+ tags=dict(required=False, type='list', default=None),
+ locked=dict(required=False, default=False, type='bool'),
+ require_full_window=dict(required=False, default=None, type='bool'),
+ id=dict(required=False)
)
)
@@ -174,24 +206,40 @@ def main():
elif module.params['state'] == 'unmute':
unmute_monitor(module)
+def _fix_template_vars(message):
+ if message:
+ return message.replace('[[', '{{').replace(']]', '}}')
+ return message
+
def _get_monitor(module):
- for monitor in api.Monitor.get_all():
- if monitor['name'] == module.params['name']:
- return monitor
+ if module.params['id'] is not None:
+ monitor = api.Monitor.get(module.params['id'])
+ if 'errors' in monitor:
+ module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
+ return monitor
+ else:
+ monitors = api.Monitor.get_all()
+ for monitor in monitors:
+ if monitor['name'] == module.params['name']:
+ return monitor
return {}
def _post_monitor(module, options):
try:
- msg = api.Monitor.create(type=module.params['type'], query=module.params['query'],
- name=module.params['name'], message=module.params['message'],
- options=options)
+ kwargs = dict(type=module.params['type'], query=module.params['query'],
+ name=module.params['name'], message=_fix_template_vars(module.params['message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.create(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
else:
module.exit_json(changed=True, msg=msg)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg=str(e))
def _equal_dicts(a, b, ignore_keys):
@@ -201,16 +249,21 @@ def _equal_dicts(a, b, ignore_keys):
def _update_monitor(module, monitor, options):
try:
- msg = api.Monitor.update(id=monitor['id'], query=module.params['query'],
- name=module.params['name'], message=module.params['message'],
- options=options)
+ kwargs = dict(id=monitor['id'], query=module.params['query'],
+ name=module.params['name'], message=_fix_template_vars(module.params['message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.update(**kwargs)
+
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
- elif _equal_dicts(msg, monitor, ['creator', 'overall_state']):
+ elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified']):
module.exit_json(changed=False, msg=msg)
else:
module.exit_json(changed=True, msg=msg)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg=str(e))
@@ -223,9 +276,13 @@ def install_monitor(module):
"renotify_interval": module.params['renotify_interval'],
"escalation_message": module.params['escalation_message'],
"notify_audit": module.boolean(module.params['notify_audit']),
+ "locked": module.boolean(module.params['locked']),
+ "require_full_window" : module.params['require_full_window']
}
if module.params['type'] == "service check":
+ options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
+ if module.params['type'] == "metric alert" and module.params['thresholds'] is not None:
options["thresholds"] = module.params['thresholds']
monitor = _get_monitor(module)
@@ -242,7 +299,8 @@ def delete_monitor(module):
try:
msg = api.Monitor.delete(monitor['id'])
module.exit_json(changed=True, msg=msg)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg=str(e))
@@ -261,7 +319,8 @@ def mute_monitor(module):
else:
msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
module.exit_json(changed=True, msg=msg)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg=str(e))
@@ -274,10 +333,10 @@ def unmute_monitor(module):
try:
msg = api.Monitor.unmute(monitor['id'])
module.exit_json(changed=True, msg=msg)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg=str(e))
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/honeybadger_deployment.py b/monitoring/honeybadger_deployment.py
new file mode 100644
index 00000000000..362af67963a
--- /dev/null
+++ b/monitoring/honeybadger_deployment.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Benjamin Curtis
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: honeybadger_deployment
+author: "Benjamin Curtis (@stympy)"
+version_added: "2.2"
+short_description: Notify Honeybadger.io about app deployments
+description:
+ - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking)
+options:
+ token:
+ description:
+ - API token.
+ required: true
+ environment:
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: true
+ user:
+ description:
+ - The username of the person doing the deployment
+ required: false
+ default: None
+ repo:
+ description:
+ - URL of the project repository
+ required: false
+ default: None
+ revision:
+ description:
+ - A hash, number, tag, or other identifier showing what revision was deployed
+ required: false
+ default: None
+ url:
+ description:
+ - Optional URL to submit the notification to.
+ required: false
+ default: "https://api.honeybadger.io/v1/deploys"
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- honeybadger_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: b6826b8
+ repo: 'git@github.com:user/repo.git'
+'''
+
+RETURN = '''# '''
+
+import urllib
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import *
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ user=dict(required=False),
+ repo=dict(required=False),
+ revision=dict(required=False),
+ url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
+ validate_certs=dict(default='yes', type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ params = {}
+
+ if module.params["environment"]:
+ params["deploy[environment]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[revision]"] = module.params["revision"]
+
+ params["api_key"] = module.params["token"]
+
+ url = module.params.get('url')
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg='Unable to notify Honeybadger: %s' % e)
+ else:
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+if __name__ == '__main__':
+ main()
+
diff --git a/monitoring/librato_annotation.py b/monitoring/librato_annotation.py
index f174bda0ea4..838abf14e60 100644
--- a/monitoring/librato_annotation.py
+++ b/monitoring/librato_annotation.py
@@ -20,6 +20,10 @@
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: librato_annotation
@@ -77,27 +81,28 @@
- librato_annotation:
user: user@example.com
api_key: XXXXXXXXXXXXXXXXX
- title: 'App Config Change'
- source: 'foo.bar'
- description: 'This is a detailed description of the config change'
+ title: App Config Change
+ source: foo.bar
+ description: This is a detailed description of the config change
# Create an annotation that includes a link
- librato_annotation:
user: user@example.com
api_key: XXXXXXXXXXXXXXXXXX
- name: 'code.deploy'
- title: 'app code deploy'
- description: 'this is a detailed description of a deployment'
+ name: code.deploy
+ title: app code deploy
+ description: this is a detailed description of a deployment
links:
- - { rel: 'example', href: 'http://www.example.com/deploy' }
+ - rel: example
+ href: http://www.example.com/deploy
# Create an annotation with a start_time and end_time
- librato_annotation:
user: user@example.com
api_key: XXXXXXXXXXXXXXXXXX
- name: 'maintenance'
- title: 'Maintenance window'
- description: 'This is a detailed description of maintenance'
+ name: maintenance
+ title: Maintenance window
+ description: This is a detailed description of maintenance
start_time: 1395940006
end_time: 1395954406
'''
diff --git a/monitoring/logentries.py b/monitoring/logentries.py
index a347afd84c2..a85679ef2eb 100644
--- a/monitoring/logentries.py
+++ b/monitoring/logentries.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: logentries
@@ -48,8 +52,16 @@
- Requires the LogEntries agent which can be installed following the instructions at logentries.com
'''
EXAMPLES = '''
-- logentries: path=/var/log/nginx/access.log state=present name=nginx-access-log
-- logentries: path=/var/log/nginx/error.log state=absent
+# Track nginx logs
+- logentries:
+ path: /var/log/nginx/access.log
+ state: present
+ name: nginx-access-log
+
+# Stop tracking nginx logs
+- logentries:
+ path: /var/log/nginx/error.log
+ state: absent
'''
def query_log_status(module, le_path, path, state="present"):
@@ -144,4 +156,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/logicmonitor.py b/monitoring/logicmonitor.py
new file mode 100644
index 00000000000..f2267207a71
--- /dev/null
+++ b/monitoring/logicmonitor.py
@@ -0,0 +1,2178 @@
+#!/usr/bin/python
+
+"""LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups
+ Copyright (C) 2015 LogicMonitor
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA"""
+
+import datetime
+import os
+import platform
+import socket
+import sys
+import types
+import urllib
+
+HAS_LIB_JSON = True
+try:
+ import json
+ # Detect the python-json library which is incompatible
+ # Look for simplejson if that's the case
+ try:
+ if (
+ not isinstance(json.loads, types.FunctionType) or
+ not isinstance(json.dumps, types.FunctionType)
+ ):
+ raise ImportError
+ except AttributeError:
+ raise ImportError
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ print(
+ '\n{"msg": "Error: ansible requires the stdlib json or ' +
+ 'simplejson module, neither was found!", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+ except SyntaxError:
+ print(
+ '\n{"msg": "SyntaxError: probably due to installed simplejson ' +
+ 'being for a different python version", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+
+RETURN = '''
+---
+success:
+ description: flag indicating that execution was successful
+ returned: success
+ type: boolean
+ sample: True
+...
+'''
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: logicmonitor
+short_description: Manage your LogicMonitor account through Ansible Playbooks
+description:
+ - LogicMonitor is a hosted, full-stack, infrastructure monitoring platform.
+ - This module manages hosts, host groups, and collectors within your LogicMonitor account.
+version_added: "2.2"
+author: [Ethan Culler-Mayeno (@ethanculler), Jeff Wozniak (@woz5999)]
+notes:
+ - You must have an existing LogicMonitor account for this module to function.
+requirements: ["An existing LogicMonitor account", "Linux"]
+options:
+ target:
+ description:
+ - The type of LogicMonitor object you wish to manage.
+ - "Collector: Perform actions on a LogicMonitor collector."
+ - NOTE You should use Ansible service modules such as M(service) or M(supervisorctl) for managing the Collector 'logicmonitor-agent' and 'logicmonitor-watchdog' services. Specifically, you'll probably want to start these services after a Collector add and stop these services before a Collector remove.
+ - "Host: Perform actions on a host device."
+ - "Hostgroup: Perform actions on a LogicMonitor host group."
+ - NOTE Host and Hostgroup tasks should always be performed via local_action. There are no benefits to running these tasks on the remote host and doing so will typically cause problems.
+ required: true
+ default: null
+ choices: ['collector', 'host', 'datsource', 'hostgroup']
+ action:
+ description:
+ - The action you wish to perform on target.
+ - "Add: Add an object to your LogicMonitor account."
+ - "Remove: Remove an object from your LogicMonitor account."
+ - "Update: Update properties, description, or groups (target=host) for an object in your LogicMonitor account."
+ - "SDT: Schedule downtime for an object in your LogicMonitor account."
+ required: true
+ default: null
+ choices: ['add', 'remove', 'update', 'sdt']
+ company:
+ description:
+ - The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes."
+ required: true
+ default: null
+ user:
+ description:
+ - A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user.
+ required: true
+ default: null
+ password:
+ description:
+ - The password of the specified LogicMonitor user
+ required: true
+ default: null
+ collector:
+ description:
+ - The fully qualified domain name of a collector in your LogicMonitor account.
+ - This is required for the creation of a LogicMonitor host (target=host action=add).
+ - This is required for updating, removing or scheduling downtime for hosts if 'displayname' isn't specified (target=host action=update action=remove action=sdt).
+ required: false
+ default: null
+ hostname:
+ description:
+ - The hostname of a host in your LogicMonitor account, or the desired hostname of a device to manage.
+ - Optional for managing hosts (target=host).
+ required: false
+ default: 'hostname -f'
+ displayname:
+ description:
+ - The display name of a host in your LogicMonitor account or the desired display name of a device to manage.
+ - Optional for managing hosts (target=host).
+ required: false
+ default: 'hostname -f'
+ description:
+ description:
+ - The long text description of the object in your LogicMonitor account.
+ - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update).
+ required: false
+ default: ""
+ properties:
+ description:
+ - A dictionary of properties to set on the LogicMonitor host or host group.
+ - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update).
+ - This parameter will add or update existing properties in your LogicMonitor account.
+ required: false
+ default: {}
+ groups:
+ description:
+ - A list of groups that the host should be a member of.
+ - Optional for managing hosts (target=host; action=add or action=update).
+ required: false
+ default: []
+ id:
+ description:
+ - ID of the datasource to target.
+ - Required for management of LogicMonitor datasources (target=datasource).
+ required: false
+ default: null
+ fullpath:
+ description:
+ - The fullpath of the host group object you would like to manage.
+ - Recommend running on a single Ansible host.
+ - Required for management of LogicMonitor host groups (target=hostgroup).
+ required: false
+ default: null
+ alertenable:
+ description:
+ - A boolean flag to turn alerting on or off for an object.
+ - Optional for managing all hosts (action=add or action=update).
+ required: false
+ default: true
+ choices: [true, false]
+ starttime:
+ description:
+ - The time that the Scheduled Down Time (SDT) should begin.
+ - Optional for managing SDT (action=sdt).
+ - Y-m-d H:M
+ required: false
+ default: Now
+ duration:
+ description:
+ - The duration (minutes) of the Scheduled Down Time (SDT).
+ - Optional for putting an object into SDT (action=sdt).
+ required: false
+ default: 30
+...
+'''
+EXAMPLES = '''
+ # example of adding a new LogicMonitor collector to these devices
+ ---
+ - hosts: collectors
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Deploy/verify LogicMonitor collectors
+ become: yes
+ logicmonitor:
+ target=collector
+ action=add
+ company={{ company }}
+ user={{ user }}
+ password={{ password }}
+
+ #example of adding a list of hosts into monitoring
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Deploy LogicMonitor Host
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=add
+ collector='mycompany-Collector'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ groups="/servers/production,/datacenter1"
+ properties="{'snmp.community':'secret','dc':'1', 'type':'prod'}"
+
+ #example of putting a datasource in SDT
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: SDT a datasource
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=datasource
+ action=sdt
+ id='123'
+ duration=3000
+ starttime='2017-03-04 05:06'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+
+ #example of creating a hostgroup
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Create a host group
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=add
+ fullpath='/servers/development'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring', 'type':'dev'}"
+
+ #example of putting a list of hosts into SDT
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: SDT hosts
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=sdt
+ duration=3000
+ starttime='2016-11-10 09:08'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ collector='mycompany-Collector'
+
+ #example of putting a host group in SDT
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: SDT a host group
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=sdt
+ fullpath='/servers/development'
+ duration=3000
+ starttime='2017-03-04 05:06'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+
+ #example of updating a list of hosts
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Update a list of hosts
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=update
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ collector='mycompany-Collector'
+ groups="/servers/production,/datacenter5"
+ properties="{'snmp.community':'commstring','dc':'5'}"
+
+ #example of updating a hostgroup
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Update a host group
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=update
+ fullpath='/servers/development'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'hg', 'type':'dev', 'status':'test'}"
+
+ #example of removing a list of hosts from monitoring
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Remove LogicMonitor hosts
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=host
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ collector='mycompany-Collector'
+
+ #example of removing a host group
+ ---
+ - hosts: hosts
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Remove LogicMonitor development servers hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/servers/development'
+ - name: Remove LogicMonitor servers hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/servers'
+ - name: Remove LogicMonitor datacenter1 hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/datacenter1'
+ - name: Remove LogicMonitor datacenter5 hostgroup
+ # All tasks except for target=collector should use local_action
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=remove
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ fullpath='/datacenter5'
+
+ ### example of removing a new LogicMonitor collector to these devices
+ ---
+ - hosts: collectors
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Remove LogicMonitor collectors
+ become: yes
+ logicmonitor:
+ target=collector
+ action=remove
+ company={{ company }}
+ user={{ user }}
+ password={{ password }}
+
+ #complete example
+ ---
+ - hosts: localhost
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Create a host group
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=add
+ fullpath='/servers/production/database'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring'}"
+ - name: SDT a host group
+ local_action: >
+ logicmonitor
+ target=hostgroup
+ action=sdt
+ fullpath='/servers/production/web'
+ duration=3000
+ starttime='2012-03-04 05:06'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+
+ - hosts: collectors
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: Deploy/verify LogicMonitor collectors
+ logicmonitor:
+ target: collector
+ action: add
+ company: {{ company }}
+ user: {{ user }}
+ password: {{ password }}
+ - name: Place LogicMonitor collectors into 30 minute Scheduled downtime
+ logicmonitor: target=collector action=sdt company={{ company }}
+ user={{ user }} password={{ password }}
+ - name: Deploy LogicMonitor Host
+ local_action: >
+ logicmonitor
+ target=host
+ action=add
+ collector=agent1.ethandev.com
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring', 'dc':'1'}"
+ groups="/servers/production/collectors, /datacenter1"
+
+ - hosts: database-servers
+ remote_user: '{{ username }}'
+ vars:
+ company: 'mycompany'
+ user: 'myusername'
+ password: 'mypassword'
+ tasks:
+ - name: deploy logicmonitor hosts
+ local_action: >
+ logicmonitor
+ target=host
+ action=add
+ collector=monitoring.dev.com
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+ properties="{'snmp.community':'commstring', 'type':'db', 'dc':'1'}"
+ groups="/servers/production/database, /datacenter1"
+ - name: schedule 5 hour downtime for 2012-11-10 09:08
+ local_action: >
+ logicmonitor
+ target=host
+ action=sdt
+ duration=3000
+ starttime='2012-11-10 09:08'
+ company='{{ company }}'
+ user='{{ user }}'
+ password='{{ password }}'
+'''
+
+
+class LogicMonitor(object):
+
+ def __init__(self, module, **params):
+ self.__version__ = "1.0-python"
+ self.module = module
+ self.module.debug("Instantiating LogicMonitor object")
+
+ self.check_mode = False
+ self.company = params["company"]
+ self.user = params["user"]
+ self.password = params["password"]
+ self.fqdn = socket.getfqdn()
+ self.lm_url = "logicmonitor.com/santaba"
+ self.__version__ = self.__version__ + "-ansible-module"
+
+ def rpc(self, action, params):
+ """Make a call to the LogicMonitor RPC library
+ and return the response"""
+ self.module.debug("Running LogicMonitor.rpc")
+
+ param_str = urllib.urlencode(params)
+ creds = urllib.urlencode(
+ {"c": self.company,
+ "u": self.user,
+ "p": self.password})
+
+ if param_str:
+ param_str = param_str + "&"
+
+ param_str = param_str + creds
+
+ try:
+ url = ("https://" + self.company + "." + self.lm_url +
+ "/rpc/" + action + "?" + param_str)
+
+ # Set custom LogicMonitor header with version
+ headers = {"X-LM-User-Agent": self.__version__}
+
+ # Set headers
+ f = open_url(url, headers=headers)
+
+ raw = f.read()
+ resp = json.loads(raw)
+ if resp["status"] == 403:
+ self.module.debug("Authentication failed.")
+ self.fail(msg="Error: " + resp["errmsg"])
+ else:
+ return raw
+ except IOError:
+ ioe = get_exception()
+ self.fail(msg="Error: Exception making RPC call to " +
+ "https://" + self.company + "." + self.lm_url +
+ "/rpc/" + action + "\nException" + str(ioe))
+
+ def do(self, action, params):
+ """Make a call to the LogicMonitor
+ server \"do\" function"""
+ self.module.debug("Running LogicMonitor.do...")
+
+ param_str = urllib.urlencode(params)
+ creds = (urllib.urlencode(
+ {"c": self.company,
+ "u": self.user,
+ "p": self.password}))
+
+ if param_str:
+ param_str = param_str + "&"
+ param_str = param_str + creds
+
+ try:
+ self.module.debug("Attempting to open URL: " +
+ "https://" + self.company + "." + self.lm_url +
+ "/do/" + action + "?" + param_str)
+ f = open_url(
+ "https://" + self.company + "." + self.lm_url +
+ "/do/" + action + "?" + param_str)
+ return f.read()
+ except IOError:
+ ioe = get_exception()
+ self.fail(msg="Error: Exception making RPC call to " +
+ "https://" + self.company + "." + self.lm_url +
+ "/do/" + action + "\nException" + str(ioe))
+
+ def get_collectors(self):
+ """Returns a JSON object containing a list of
+ LogicMonitor collectors"""
+ self.module.debug("Running LogicMonitor.get_collectors...")
+
+ self.module.debug("Making RPC call to 'getAgents'")
+ resp = self.rpc("getAgents", {})
+ resp_json = json.loads(resp)
+
+ if resp_json["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ return resp_json["data"]
+ else:
+ self.fail(msg=resp)
+
+ def get_host_by_hostname(self, hostname, collector):
+ """Returns a host object for the host matching the
+ specified hostname"""
+ self.module.debug("Running LogicMonitor.get_host_by_hostname...")
+
+ self.module.debug("Looking for hostname " + hostname)
+ self.module.debug("Making RPC call to 'getHosts'")
+ hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1}))
+
+ if collector:
+ if hostlist_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ hosts = hostlist_json["data"]["hosts"]
+
+ self.module.debug(
+ "Looking for host matching: hostname " + hostname +
+ " and collector " + str(collector["id"]))
+
+ for host in hosts:
+ if (host["hostName"] == hostname and
+ host["agentId"] == collector["id"]):
+
+ self.module.debug("Host match found")
+ return host
+ self.module.debug("No host match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(hostlist_json)
+ else:
+ self.module.debug("No collector specified")
+ return None
+
+ def get_host_by_displayname(self, displayname):
+ """Returns a host object for the host matching the
+ specified display name"""
+ self.module.debug("Running LogicMonitor.get_host_by_displayname...")
+
+ self.module.debug("Looking for displayname " + displayname)
+ self.module.debug("Making RPC call to 'getHost'")
+ host_json = (json.loads(self.rpc("getHost",
+ {"displayName": displayname})))
+
+ if host_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return host_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(host_json)
+ return None
+
+ def get_collector_by_description(self, description):
+ """Returns a JSON collector object for the collector
+ matching the specified FQDN (description)"""
+ self.module.debug(
+ "Running LogicMonitor.get_collector_by_description..."
+ )
+
+ collector_list = self.get_collectors()
+ if collector_list is not None:
+ self.module.debug("Looking for collector with description {0}" +
+ description)
+ for collector in collector_list:
+ if collector["description"] == description:
+ self.module.debug("Collector match found")
+ return collector
+ self.module.debug("No collector match found")
+ return None
+
+ def get_group(self, fullpath):
+ """Returns a JSON group object for the group matching the
+ specified path"""
+ self.module.debug("Running LogicMonitor.get_group...")
+
+ self.module.debug("Making RPC call to getHostGroups")
+ resp = json.loads(self.rpc("getHostGroups", {}))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC called succeeded")
+ groups = resp["data"]
+
+ self.module.debug("Looking for group matching " + fullpath)
+ for group in groups:
+ if group["fullPath"] == fullpath.lstrip('/'):
+ self.module.debug("Group match found")
+ return group
+
+ self.module.debug("No group match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+
+ return None
+
+ def create_group(self, fullpath):
+ """Recursively create a path of host groups.
+ Returns the id of the newly created hostgroup"""
+ self.module.debug("Running LogicMonitor.create_group...")
+
+ res = self.get_group(fullpath)
+ if res:
+ self.module.debug("Group {0} exists." + fullpath)
+ return res["id"]
+
+ if fullpath == "/":
+ self.module.debug("Specified group is root. Doing nothing.")
+ return 1
+ else:
+ self.module.debug("Creating group named " + fullpath)
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ parentpath, name = fullpath.rsplit('/', 1)
+ parentgroup = self.get_group(parentpath)
+
+ parentid = 1
+
+ if parentpath == "":
+ parentid = 1
+ elif parentgroup:
+ parentid = parentgroup["id"]
+ else:
+ parentid = self.create_group(parentpath)
+
+ h = None
+
+ # Determine if we're creating a group from host or hostgroup class
+ if hasattr(self, '_build_host_group_hash'):
+ h = self._build_host_group_hash(
+ fullpath,
+ self.description,
+ self.properties,
+ self.alertenable)
+ h["name"] = name
+ h["parentId"] = parentid
+ else:
+ h = {"name": name,
+ "parentId": parentid,
+ "alertEnable": True,
+ "description": ""}
+
+ self.module.debug("Making RPC call to 'addHostGroup'")
+ resp = json.loads(
+ self.rpc("addHostGroup", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["id"]
+ elif resp["errmsg"] == "The record already exists":
+ self.module.debug("The hostgroup already exists")
+ group = self.get_group(fullpath)
+ return group["id"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(
+ msg="Error: unable to create new hostgroup \"" +
+ name + "\".\n" + resp["errmsg"])
+
+ def fail(self, msg):
+ self.module.fail_json(msg=msg, changed=self.change, failed=True)
+
+ def exit(self, changed):
+ self.module.debug("Changed: " + changed)
+ self.module.exit_json(changed=changed, success=True)
+
+ def output_info(self, info):
+ self.module.debug("Registering properties as Ansible facts")
+ self.module.exit_json(changed=False, ansible_facts=info)
+
+
+class Collector(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor Collector object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **params)
+ self.module.debug("Instantiating Collector object")
+
+ if self.params['description']:
+ self.description = self.params['description']
+ else:
+ self.description = self.fqdn
+
+ self.info = self._get()
+ self.installdir = "/usr/local/logicmonitor"
+ self.platform = platform.system()
+ self.is_64bits = sys.maxsize > 2**32
+ self.duration = self.params['duration']
+ self.starttime = self.params['starttime']
+
+ if self.info is None:
+ self.id = None
+ else:
+ self.id = self.info["id"]
+
+ def create(self):
+ """Idempotent function to make sure that there is
+ a running collector installed and registered"""
+ self.module.debug("Running Collector.create...")
+
+ self._create()
+ self.get_installer_binary()
+ self.install()
+
+ def remove(self):
+ """Idempotent function to make sure that there is
+ not a running collector installed and registered"""
+ self.module.debug("Running Collector.destroy...")
+
+ self._unreigster()
+ self.uninstall()
+
+ def get_installer_binary(self):
+ """Download the LogicMonitor collector installer binary"""
+ self.module.debug("Running Collector.get_installer_binary...")
+
+ arch = 32
+
+ if self.is_64bits:
+ self.module.debug("64 bit system")
+ arch = 64
+ else:
+ self.module.debug("32 bit system")
+
+ if self.platform == "Linux" and self.id is not None:
+ self.module.debug("Platform is Linux")
+ self.module.debug("Agent ID is " + str(self.id))
+
+ installfilepath = (self.installdir +
+ "/logicmonitorsetup" +
+ str(self.id) + "_" + str(arch) +
+ ".bin")
+
+ self.module.debug("Looking for existing installer at " +
+ installfilepath)
+ if not os.path.isfile(installfilepath):
+ self.module.debug("No previous installer found")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Downloading installer file")
+ # attempt to create the install dir before download
+ self.module.run_command("mkdir " + self.installdir)
+
+ try:
+ f = open(installfilepath, "w")
+ installer = (self.do("logicmonitorsetup",
+ {"id": self.id,
+ "arch": arch}))
+ f.write(installer)
+ f.closed
+ except:
+ self.fail(msg="Unable to open installer file for writing")
+ f.closed
+ else:
+ self.module.debug("Collector installer already exists")
+ return installfilepath
+
+ elif self.id is None:
+ self.fail(
+ msg="Error: There is currently no collector " +
+ "associated with this device. To download " +
+ " the installer, first create a collector " +
+ "for this device.")
+ elif self.platform != "Linux":
+ self.fail(
+ msg="Error: LogicMonitor Collector must be " +
+ "installed on a Linux device.")
+ else:
+ self.fail(
+ msg="Error: Unable to retrieve the installer from the server")
+
+ def install(self):
+ """Execute the LogicMonitor installer if not
+ already installed"""
+ self.module.debug("Running Collector.install...")
+
+ if self.platform == "Linux":
+ self.module.debug("Platform is Linux")
+
+ installer = self.get_installer_binary()
+
+ if self.info is None:
+ self.module.debug("Retriving collector information")
+ self.info = self._get()
+
+ if not os.path.exists(self.installdir + "/agent"):
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Setting installer file permissions")
+ os.chmod(installer, 484) # decimal for 0o744
+
+ self.module.debug("Executing installer")
+ ret_code, out, err = self.module.run_command(installer + " -y")
+
+ if ret_code != 0:
+ self.fail(msg="Error: Unable to install collector: " + err)
+ else:
+ self.module.debug("Collector installed successfully")
+ else:
+ self.module.debug("Collector already installed")
+ else:
+ self.fail(
+ msg="Error: LogicMonitor Collector must be " +
+ "installed on a Linux device")
+
+ def uninstall(self):
+ """Uninstall LogicMontitor collector from the system"""
+ self.module.debug("Running Collector.uninstall...")
+
+ uninstallfile = self.installdir + "/agent/bin/uninstall.pl"
+
+ if os.path.isfile(uninstallfile):
+ self.module.debug("Collector uninstall file exists")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Running collector uninstaller")
+ ret_code, out, err = self.module.run_command(uninstallfile)
+
+ if ret_code != 0:
+ self.fail(
+ msg="Error: Unable to uninstall collector: " + err)
+ else:
+ self.module.debug("Collector successfully uninstalled")
+ else:
+ if os.path.exists(self.installdir + "/agent"):
+ (self.fail(
+ msg="Unable to uninstall LogicMonitor " +
+ "Collector. Can not find LogicMonitor " +
+ "uninstaller."))
+
+ def sdt(self):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Collector.sdt...")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offsetstart = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"agentId": self.id,
+ "type": 1,
+ "notifyCC": True,
+ "year": offsetstart.year,
+ "month": offsetstart.month-1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month-1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to 'setAgentSDT'")
+ resp = json.loads(self.rpc("setAgentSDT", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+
+ def site_facts(self):
+ """Output current properties information for the Collector"""
+ self.module.debug("Running Collector.site_facts...")
+
+ if self.info:
+ self.module.debug("Collector exists")
+ props = self.get_properties(True)
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Collector doesn't exit.")
+
+ def _get(self):
+ """Returns a JSON object representing this collector"""
+ self.module.debug("Running Collector._get...")
+ collector_list = self.get_collectors()
+
+ if collector_list is not None:
+ self.module.debug("Collectors returned")
+ for collector in collector_list:
+ if collector["description"] == self.description:
+ return collector
+ else:
+ self.module.debug("No collectors returned")
+ return None
+
+ def _create(self):
+ """Create a new collector in the associated
+ LogicMonitor account"""
+ self.module.debug("Running Collector._create...")
+
+ if self.platform == "Linux":
+ self.module.debug("Platform is Linux")
+ ret = self.info or self._get()
+
+ if ret is None:
+ self.change = True
+ self.module.debug("System changed")
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = {"autogen": True,
+ "description": self.description}
+
+ self.module.debug("Making RPC call to 'addAgent'")
+ create = (json.loads(self.rpc("addAgent", h)))
+
+ if create["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ self.info = create["data"]
+ self.id = create["data"]["id"]
+ return create["data"]
+ else:
+ self.fail(msg=create["errmsg"])
+ else:
+ self.info = ret
+ self.id = ret["id"]
+ return ret
+ else:
+ self.fail(
+ msg="Error: LogicMonitor Collector must be " +
+ "installed on a Linux device.")
+
+ def _unreigster(self):
+ """Delete this collector from the associated
+ LogicMonitor account"""
+ self.module.debug("Running Collector._unreigster...")
+
+ if self.info is None:
+ self.module.debug("Retrieving collector information")
+ self.info = self._get()
+
+ if self.info is not None:
+ self.module.debug("Collector found")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Making RPC call to 'deleteAgent'")
+ delete = json.loads(self.rpc("deleteAgent",
+ {"id": self.id}))
+
+ if delete["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ return delete
+ else:
+ # The collector couldn't unregister. Start the service again
+ self.module.debug("Error unregistering collecting. " +
+ delete["errmsg"])
+ self.fail(msg=delete["errmsg"])
+ else:
+ self.module.debug("Collector not found")
+ return None
+
+
+class Host(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+ self.collector = None
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Host object")
+
+ if self.params["hostname"]:
+ self.module.debug("Hostname is " + self.params["hostname"])
+ self.hostname = self.params['hostname']
+ else:
+ self.module.debug("No hostname specified. Using " + self.fqdn)
+ self.hostname = self.fqdn
+
+ if self.params["displayname"]:
+ self.module.debug("Display name is " + self.params["displayname"])
+ self.displayname = self.params['displayname']
+ else:
+ self.module.debug("No display name specified. Using " + self.fqdn)
+ self.displayname = self.fqdn
+
+ # Attempt to host information via display name of host name
+ self.module.debug("Attempting to find host by displayname " +
+ self.displayname)
+ info = self.get_host_by_displayname(self.displayname)
+
+ if info is not None:
+ self.module.debug("Host found by displayname")
+ # Used the host information to grab the collector description
+ # if not provided
+ if (not hasattr(self.params, "collector") and
+ "agentDescription" in info):
+ self.module.debug("Setting collector from host response. " +
+ "Collector " + info["agentDescription"])
+ self.params["collector"] = info["agentDescription"]
+ else:
+ self.module.debug("Host not found by displayname")
+
+ # At this point, a valid collector description is required for success
+ # Check that the description exists or fail
+ if self.params["collector"]:
+ self.module.debug(
+ "Collector specified is " +
+ self.params["collector"]
+ )
+ self.collector = (self.get_collector_by_description(
+ self.params["collector"]))
+ else:
+ self.fail(msg="No collector specified.")
+
+ # If the host wasn't found via displayname, attempt by hostname
+ if info is None:
+ self.module.debug("Attempting to find host by hostname " +
+ self.hostname)
+ info = self.get_host_by_hostname(self.hostname, self.collector)
+
+ self.info = info
+ self.properties = self.params["properties"]
+ self.description = self.params["description"]
+ self.starttime = self.params["starttime"]
+ self.duration = self.params["duration"]
+ self.alertenable = self.params["alertenable"]
+ if self.params["groups"] is not None:
+ self.groups = self._strip_groups(self.params["groups"])
+ else:
+ self.groups = None
+
+ def create(self):
+ """Idemopotent function to create if missing,
+ update if changed, or skip"""
+ self.module.debug("Running Host.create...")
+
+ self.update()
+
+ def get_properties(self):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Host.get_properties...")
+
+ if self.info:
+ self.module.debug("Making RPC call to 'getHostProperties'")
+ properties_json = (json.loads(self.rpc("getHostProperties",
+ {'hostId': self.info["id"],
+ "filterSystemProperties": True})))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("Error: there was an issue retrieving the " +
+ "host properties")
+ self.module.debug(properties_json["errmsg"])
+
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug(
+ "Unable to find LogicMonitor host which matches " +
+ self.displayname + " (" + self.hostname + ")"
+ )
+ return None
+
+ def set_properties(self, propertyhash):
+ """update the host to have the properties
+ contained in the property hash"""
+ self.module.debug("Running Host.set_properties...")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Assigning property hash to host object")
+ self.properties = propertyhash
+
+ def add(self):
+ """Add this device to monitoring
+ in your LogicMonitor account"""
+ self.module.debug("Running Host.add...")
+
+ if self.collector and not self.info:
+ self.module.debug("Host not registered. Registering.")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = self._build_host_hash(
+ self.hostname,
+ self.displayname,
+ self.collector,
+ self.description,
+ self.groups,
+ self.properties,
+ self.alertenable)
+
+ self.module.debug("Making RPC call to 'addHost'")
+ resp = json.loads(self.rpc("addHost", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+ return resp["errmsg"]
+ elif self.collector is None:
+ self.fail(msg="Specified collector doesn't exist")
+ else:
+ self.module.debug("Host already registered")
+
+ def update(self):
+ """This method takes changes made to this host
+ and applies them to the corresponding host
+ in your LogicMonitor account."""
+ self.module.debug("Running Host.update...")
+
+ if self.info:
+ self.module.debug("Host already registed")
+ if self.is_changed():
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = (self._build_host_hash(
+ self.hostname,
+ self.displayname,
+ self.collector,
+ self.description,
+ self.groups,
+ self.properties,
+ self.alertenable))
+ h["id"] = self.info["id"]
+ h["opType"] = "replace"
+
+ self.module.debug("Making RPC call to 'updateHost'")
+ resp = json.loads(self.rpc("updateHost", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg="Error: unable to update the host.")
+ else:
+ self.module.debug(
+ "Host properties match supplied properties. " +
+ "No changes to make."
+ )
+ return self.info
+ else:
+ self.module.debug("Host not registed. Registering")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ return self.add()
+
+ def remove(self):
+ """Remove this host from your LogicMonitor account"""
+ self.module.debug("Running Host.remove...")
+
+ if self.info:
+ self.module.debug("Host registered")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Making RPC call to 'deleteHost'")
+ resp = json.loads(self.rpc("deleteHost",
+ {"hostId": self.info["id"],
+ "deleteFromSystem": True,
+ "hostGroupId": 1}))
+
+ if resp["status"] == 200:
+ self.module.debug(resp)
+ self.module.debug("RPC call succeeded")
+ return resp
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+ self.fail(msg=resp["errmsg"])
+
+ else:
+ self.module.debug("Host not registered")
+
+ def is_changed(self):
+ """Return true if the host doesn't
+ match the LogicMonitor account"""
+ self.module.debug("Running Host.is_changed")
+
+ ignore = ['system.categories', 'snmp.version']
+
+ hostresp = self.get_host_by_displayname(self.displayname)
+
+ if hostresp is None:
+ hostresp = self.get_host_by_hostname(self.hostname, self.collector)
+
+ if hostresp:
+ self.module.debug("Comparing simple host properties")
+ if hostresp["alertEnable"] != self.alertenable:
+ return True
+
+ if hostresp["description"] != self.description:
+ return True
+
+ if hostresp["displayedAs"] != self.displayname:
+ return True
+
+ if (self.collector and
+ hasattr(self.collector, "id") and
+ hostresp["agentId"] != self.collector["id"]):
+ return True
+
+ self.module.debug("Comparing groups.")
+ if self._compare_groups(hostresp) is True:
+ return True
+
+ propresp = self.get_properties()
+
+ if propresp:
+ self.module.debug("Comparing properties.")
+ if self._compare_props(propresp, ignore) is True:
+ return True
+ else:
+ self.fail(
+ msg="Error: Unknown error retrieving host properties")
+
+ return False
+ else:
+ self.fail(msg="Error: Unknown error retrieving host information")
+
+ def sdt(self):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Host.sdt...")
+ if self.info:
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offset = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = (json.loads(self.rpc("getTimeZoneSetting", {})))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(
+ msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"hostId": self.info["id"],
+ "type": 1,
+ "year": offsetstart.year,
+ "month": offsetstart.month - 1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month - 1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to 'setHostSDT'")
+ resp = (json.loads(self.rpc("setHostSDT", h)))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+ else:
+ self.fail(msg="Error: Host doesn't exit.")
+
+ def site_facts(self):
+ """Output current properties information for the Host"""
+ self.module.debug("Running Host.site_facts...")
+
+ if self.info:
+ self.module.debug("Host exists")
+ props = self.get_properties()
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Host doesn't exit.")
+
+ def _build_host_hash(self,
+ hostname,
+ displayname,
+ collector,
+ description,
+ groups,
+ properties,
+ alertenable):
+ """Return a property formated hash for the
+ creation of a host using the rpc function"""
+ self.module.debug("Running Host._build_host_hash...")
+
+ h = {}
+ h["hostName"] = hostname
+ h["displayedAs"] = displayname
+ h["alertEnable"] = alertenable
+
+ if collector:
+ self.module.debug("Collector property exists")
+ h["agentId"] = collector["id"]
+ else:
+ self.fail(
+ msg="Error: No collector found. Unable to build host hash.")
+
+ if description:
+ h["description"] = description
+
+ if groups is not None and groups is not []:
+ self.module.debug("Group property exists")
+ groupids = ""
+
+ for group in groups:
+ groupids = groupids + str(self.create_group(group)) + ","
+
+ h["hostGroupIds"] = groupids.rstrip(',')
+
+ if properties is not None and properties is not {}:
+ self.module.debug("Properties hash exists")
+ propnum = 0
+ for key, value in properties.iteritems():
+ h["propName" + str(propnum)] = key
+ h["propValue" + str(propnum)] = value
+ propnum = propnum + 1
+
+ return h
+
+ def _verify_property(self, propname):
+ """Check with LogicMonitor server to
+ verify property is unchanged"""
+ self.module.debug("Running Host._verify_property...")
+
+ if self.info:
+ self.module.debug("Host is registered")
+ if propname not in self.properties:
+ self.module.debug("Property " + propname + " does not exist")
+ return False
+ else:
+ self.module.debug("Property " + propname + " exists")
+ h = {"hostId": self.info["id"],
+ "propName0": propname,
+ "propValue0": self.properties[propname]}
+
+ self.module.debug("Making RCP call to 'verifyProperties'")
+ resp = json.loads(self.rpc('verifyProperties', h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["match"]
+ else:
+ self.fail(
+ msg="Error: unable to get verification " +
+ "from server.\n%s" % resp["errmsg"])
+ else:
+ self.fail(
+ msg="Error: Host doesn't exist. Unable to verify properties")
+
+ def _compare_groups(self, hostresp):
+ """Function to compare the host's current
+ groups against provided groups"""
+ self.module.debug("Running Host._compare_groups")
+
+ g = []
+ fullpathinids = hostresp["fullPathInIds"]
+ self.module.debug("Building list of groups")
+ for path in fullpathinids:
+ if path != []:
+ h = {'hostGroupId': path[-1]}
+
+ hgresp = json.loads(self.rpc("getHostGroup", h))
+
+ if (hgresp["status"] == 200 and
+ hgresp["data"]["appliesTo"] == ""):
+
+ g.append(path[-1])
+
+ if self.groups is not None:
+ self.module.debug("Comparing group lists")
+ for group in self.groups:
+ groupjson = self.get_group(group)
+
+ if groupjson is None:
+ self.module.debug("Group mismatch. No result.")
+ return True
+ elif groupjson['id'] not in g:
+ self.module.debug("Group mismatch. ID doesn't exist.")
+ return True
+ else:
+ g.remove(groupjson['id'])
+
+ if g != []:
+ self.module.debug("Group mismatch. New ID exists.")
+ return True
+ self.module.debug("Groups match")
+
+ def _compare_props(self, propresp, ignore):
+ """Function to compare the host's current
+ properties against provided properties"""
+ self.module.debug("Running Host._compare_props...")
+ p = {}
+
+ self.module.debug("Creating list of properties")
+ for prop in propresp:
+ if prop["name"] not in ignore:
+ if ("*******" in prop["value"] and
+ self._verify_property(prop["name"])):
+ p[prop["name"]] = self.properties[prop["name"]]
+ else:
+ p[prop["name"]] = prop["value"]
+
+ self.module.debug("Comparing properties")
+ # Iterate provided properties and compare to received properties
+ for prop in self.properties:
+ if (prop not in p or
+ p[prop] != self.properties[prop]):
+ self.module.debug("Properties mismatch")
+ return True
+ self.module.debug("Properties match")
+
+ def _strip_groups(self, groups):
+ """Function to strip whitespace from group list.
+ This function provides the user some flexibility when
+ formatting group arguments """
+ self.module.debug("Running Host._strip_groups...")
+ return map(lambda x: x.strip(), groups)
+
+
+class Datasource(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor Datasource object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **params)
+ self.module.debug("Instantiating Datasource object")
+
+ self.id = self.params["id"]
+ self.starttime = self.params["starttime"]
+ self.duration = self.params["duration"]
+
+ def sdt(self):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Datasource.sdt...")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offsetstart = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"hostDataSourceId": self.id,
+ "type": 1,
+ "notifyCC": True,
+ "year": offsetstart.year,
+ "month": offsetstart.month-1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month-1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to 'setHostDataSourceSDT'")
+ resp = json.loads(self.rpc("setHostDataSourceSDT", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+
+
+class Hostgroup(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Hostgroup object")
+
+ self.fullpath = self.params["fullpath"]
+ self.info = self.get_group(self.fullpath)
+ self.properties = self.params["properties"]
+ self.description = self.params["description"]
+ self.starttime = self.params["starttime"]
+ self.duration = self.params["duration"]
+ self.alertenable = self.params["alertenable"]
+
+ def create(self):
+ """Wrapper for self.update()"""
+ self.module.debug("Running Hostgroup.create...")
+ self.update()
+
+ def get_properties(self, final=False):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Hostgroup.get_properties...")
+
+ if self.info:
+ self.module.debug("Group found")
+
+ self.module.debug("Making RPC call to 'getHostGroupProperties'")
+ properties_json = json.loads(self.rpc(
+ "getHostGroupProperties",
+ {'hostGroupId': self.info["id"],
+ "finalResult": final}))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug("Group not found")
+ return None
+
+ def set_properties(self, propertyhash):
+ """Update the host to have the properties
+ contained in the property hash"""
+ self.module.debug("Running Hostgroup.set_properties")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Assigning property has to host object")
+ self.properties = propertyhash
+
+ def add(self):
+ """Idempotent function to ensure that the host
+ group exists in your LogicMonitor account"""
+ self.module.debug("Running Hostgroup.add")
+
+ if self.info is None:
+ self.module.debug("Group doesn't exist. Creating.")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.create_group(self.fullpath)
+ self.info = self.get_group(self.fullpath)
+
+ self.module.debug("Group created")
+ return self.info
+ else:
+ self.module.debug("Group already exists")
+
+ def update(self):
+ """Idempotent function to ensure the host group settings
+ (alertenable, properties, etc) in the
+ LogicMonitor account match the current object."""
+ self.module.debug("Running Hostgroup.update")
+
+ if self.info:
+ if self.is_changed():
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ h = self._build_host_group_hash(
+ self.fullpath,
+ self.description,
+ self.properties,
+ self.alertenable)
+ h["opType"] = "replace"
+
+ if self.fullpath != "/":
+ h["id"] = self.info["id"]
+
+ self.module.debug("Making RPC call to 'updateHostGroup'")
+ resp = json.loads(self.rpc("updateHostGroup", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg="Error: Unable to update the " +
+ "host.\n" + resp["errmsg"])
+ else:
+ self.module.debug(
+ "Group properties match supplied properties. " +
+ "No changes to make"
+ )
+ return self.info
+ else:
+ self.module.debug("Group doesn't exist. Creating.")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ return self.add()
+
+ def remove(self):
+ """Idempotent function to ensure the host group
+ does not exist in your LogicMonitor account"""
+ self.module.debug("Running Hostgroup.remove...")
+
+ if self.info:
+ self.module.debug("Group exists")
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ self.module.debug("Making RPC call to 'deleteHostGroup'")
+ resp = json.loads(self.rpc("deleteHostGroup",
+ {"hgId": self.info["id"]}))
+
+ if resp["status"] == 200:
+ self.module.debug(resp)
+ self.module.debug("RPC call succeeded")
+ return resp
+ elif resp["errmsg"] == "No such group":
+ self.module.debug("Group doesn't exist")
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+ self.fail(msg=resp["errmsg"])
+ else:
+ self.module.debug("Group doesn't exist")
+
+ def is_changed(self):
+ """Return true if the host doesn't match
+ the LogicMonitor account"""
+ self.module.debug("Running Hostgroup.is_changed...")
+
+ ignore = []
+ group = self.get_group(self.fullpath)
+ properties = self.get_properties()
+
+ if properties is not None and group is not None:
+ self.module.debug("Comparing simple group properties")
+ if (group["alertEnable"] != self.alertenable or
+ group["description"] != self.description):
+
+ return True
+
+ p = {}
+
+ self.module.debug("Creating list of properties")
+ for prop in properties:
+ if prop["name"] not in ignore:
+ if ("*******" in prop["value"] and
+ self._verify_property(prop["name"])):
+
+ p[prop["name"]] = (
+ self.properties[prop["name"]])
+ else:
+ p[prop["name"]] = prop["value"]
+
+ self.module.debug("Comparing properties")
+ if set(p) != set(self.properties):
+ return True
+ else:
+ self.module.debug("No property information received")
+ return False
+
+ def sdt(self, duration=30, starttime=None):
+ """Create a scheduled down time
+ (maintenance window) for this host"""
+ self.module.debug("Running Hostgroup.sdt")
+
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ duration = self.duration
+ starttime = self.starttime
+ offset = starttime
+
+ if starttime:
+ self.module.debug("Start time specified")
+ start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M')
+ offsetstart = start
+ else:
+ self.module.debug("No start time specified. Using default.")
+ start = datetime.datetime.utcnow()
+
+ # Use user UTC offset
+ self.module.debug("Making RPC call to 'getTimeZoneSetting'")
+ accountresp = json.loads(self.rpc("getTimeZoneSetting", {}))
+
+ if accountresp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ offset = accountresp["data"]["offset"]
+ offsetstart = start + datetime.timedelta(0, offset)
+ else:
+ self.fail(
+ msg="Error: Unable to retrieve timezone offset")
+
+ offsetend = offsetstart + datetime.timedelta(0, int(duration)*60)
+
+ h = {"hostGroupId": self.info["id"],
+ "type": 1,
+ "year": offsetstart.year,
+ "month": offsetstart.month-1,
+ "day": offsetstart.day,
+ "hour": offsetstart.hour,
+ "minute": offsetstart.minute,
+ "endYear": offsetend.year,
+ "endMonth": offsetend.month-1,
+ "endDay": offsetend.day,
+ "endHour": offsetend.hour,
+ "endMinute": offsetend.minute}
+
+ self.module.debug("Making RPC call to setHostGroupSDT")
+ resp = json.loads(self.rpc("setHostGroupSDT", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=resp["errmsg"])
+
+ def site_facts(self):
+ """Output current properties information for the Hostgroup"""
+ self.module.debug("Running Hostgroup.site_facts...")
+
+ if self.info:
+ self.module.debug("Group exists")
+ props = self.get_properties(True)
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Group doesn't exit.")
+
+ def _build_host_group_hash(self,
+ fullpath,
+ description,
+ properties,
+ alertenable):
+ """Return a property formated hash for the
+ creation of a hostgroup using the rpc function"""
+ self.module.debug("Running Hostgroup._build_host_hash")
+
+ h = {}
+ h["alertEnable"] = alertenable
+
+ if fullpath == "/":
+ self.module.debug("Group is root")
+ h["id"] = 1
+ else:
+ self.module.debug("Determining group path")
+ parentpath, name = fullpath.rsplit('/', 1)
+ parent = self.get_group(parentpath)
+
+ h["name"] = name
+
+ if parent:
+ self.module.debug("Parent group " +
+ str(parent["id"]) + " found.")
+ h["parentID"] = parent["id"]
+ else:
+ self.module.debug("No parent group found. Using root.")
+ h["parentID"] = 1
+
+ if description:
+ self.module.debug("Description property exists")
+ h["description"] = description
+
+ if properties != {}:
+ self.module.debug("Properties hash exists")
+ propnum = 0
+ for key, value in properties.iteritems():
+ h["propName" + str(propnum)] = key
+ h["propValue" + str(propnum)] = value
+ propnum = propnum + 1
+
+ return h
+
+ def _verify_property(self, propname):
+ """Check with LogicMonitor server
+ to verify property is unchanged"""
+ self.module.debug("Running Hostgroup._verify_property")
+
+ if self.info:
+ self.module.debug("Group exists")
+ if propname not in self.properties:
+ self.module.debug("Property " + propname + " does not exist")
+ return False
+ else:
+ self.module.debug("Property " + propname + " exists")
+ h = {"hostGroupId": self.info["id"],
+ "propName0": propname,
+ "propValue0": self.properties[propname]}
+
+ self.module.debug("Making RCP call to 'verifyProperties'")
+ resp = json.loads(self.rpc('verifyProperties', h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["match"]
+ else:
+ self.fail(
+ msg="Error: unable to get verification " +
+ "from server.\n%s" % resp["errmsg"])
+ else:
+ self.fail(
+ msg="Error: Group doesn't exist. Unable to verify properties")
+
+
+def selector(module):
+ """Figure out which object and which actions
+ to take given the right parameters"""
+
+ if module.params["target"] == "collector":
+ target = Collector(module.params, module)
+ elif module.params["target"] == "host":
+ # Make sure required parameter collector is specified
+ if ((module.params["action"] == "add" or
+ module.params["displayname"] is None) and
+ module.params["collector"] is None):
+ module.fail_json(
+ msg="Parameter 'collector' required.")
+
+ target = Host(module.params, module)
+ elif module.params["target"] == "datasource":
+ # Validate target specific required parameters
+ if module.params["id"] is not None:
+ # make sure a supported action was specified
+ if module.params["action"] == "sdt":
+ target = Datasource(module.params, module)
+ else:
+ errmsg = ("Error: Unexpected action \"" +
+ module.params["action"] + "\" was specified.")
+ module.fail_json(msg=errmsg)
+
+ elif module.params["target"] == "hostgroup":
+ # Validate target specific required parameters
+ if module.params["fullpath"] is not None:
+ target = Hostgroup(module.params, module)
+ else:
+ module.fail_json(
+ msg="Parameter 'fullpath' required for target 'hostgroup'")
+ else:
+ module.fail_json(
+ msg="Error: Unexpected target \"" + module.params["target"] +
+ "\" was specified.")
+
+ if module.params["action"].lower() == "add":
+ action = target.create
+ elif module.params["action"].lower() == "remove":
+ action = target.remove
+ elif module.params["action"].lower() == "sdt":
+ action = target.sdt
+ elif module.params["action"].lower() == "update":
+ action = target.update
+ else:
+ errmsg = ("Error: Unexpected action \"" + module.params["action"] +
+ "\" was specified.")
+ module.fail_json(msg=errmsg)
+
+ action()
+ module.exit_json(changed=target.change)
+
+
+def main():
+ TARGETS = [
+ "collector",
+ "host",
+ "datasource",
+ "hostgroup"]
+
+ ACTIONS = [
+ "add",
+ "remove",
+ "sdt",
+ "update"]
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ target=dict(required=True, default=None, choices=TARGETS),
+ action=dict(required=True, default=None, choices=ACTIONS),
+ company=dict(required=True, default=None),
+ user=dict(required=True, default=None),
+ password=dict(required=True, default=None, no_log=True),
+
+ collector=dict(required=False, default=None),
+ hostname=dict(required=False, default=None),
+ displayname=dict(required=False, default=None),
+ id=dict(required=False, default=None),
+ description=dict(required=False, default=""),
+ fullpath=dict(required=False, default=None),
+ starttime=dict(required=False, default=None),
+ duration=dict(required=False, default=30),
+ properties=dict(required=False, default={}, type="dict"),
+ groups=dict(required=False, default=[], type="list"),
+ alertenable=dict(required=False, default="true", choices=BOOLEANS)
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_LIB_JSON is not True:
+ module.fail_json(msg="Unable to load JSON library")
+
+ selector(module)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.urls import open_url
+
+
+if __name__ == "__main__":
+ main()
diff --git a/monitoring/logicmonitor_facts.py b/monitoring/logicmonitor_facts.py
new file mode 100644
index 00000000000..5ade901a76a
--- /dev/null
+++ b/monitoring/logicmonitor_facts.py
@@ -0,0 +1,638 @@
+#!/usr/bin/python
+
+"""LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups
+ Copyright (C) 2015 LogicMonitor
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA"""
+
+
+import socket
+import types
+import urllib
+
+HAS_LIB_JSON = True
+try:
+ import json
+ # Detect the python-json library which is incompatible
+ # Look for simplejson if that's the case
+ try:
+ if (
+ not isinstance(json.loads, types.FunctionType) or
+ not isinstance(json.dumps, types.FunctionType)
+ ):
+ raise ImportError
+ except AttributeError:
+ raise ImportError
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ print(
+ '\n{"msg": "Error: ansible requires the stdlib json or ' +
+ 'simplejson module, neither was found!", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+ except SyntaxError:
+ print(
+ '\n{"msg": "SyntaxError: probably due to installed simplejson ' +
+ 'being for a different python version", "failed": true}'
+ )
+ HAS_LIB_JSON = False
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: logicmonitor_facts
+short_description: Collect facts about LogicMonitor objects
+description:
+ - LogicMonitor is a hosted, full-stack, infrastructure monitoring platform.
+ - This module collects facts about hosts and host groups within your LogicMonitor account.
+version_added: "2.2"
+author: [Ethan Culler-Mayeno (@ethanculler), Jeff Wozniak (@woz5999)]
+notes:
+ - You must have an existing LogicMonitor account for this module to function.
+requirements: ["An existing LogicMonitor account", "Linux"]
+options:
+ target:
+ description:
+ - The LogicMonitor object you wish to manage.
+ required: true
+ default: null
+ choices: ['host', 'hostgroup']
+ company:
+ description:
+ - The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes".
+ required: true
+ default: null
+ user:
+ description:
+ - A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user.
+ required: true
+ default: null
+ password:
+ description:
+ - The password for the chosen LogicMonitor User.
+ - If an md5 hash is used, the digest flag must be set to true.
+ required: true
+ default: null
+ collector:
+ description:
+ - The fully qualified domain name of a collector in your LogicMonitor account.
+ - This is optional for querying a LogicMonitor host when a displayname is specified.
+ - This is required for querying a LogicMonitor host when a displayname is not specified.
+ required: false
+ default: null
+ hostname:
+ description:
+ - The hostname of a host in your LogicMonitor account, or the desired hostname of a device to add into monitoring.
+ - Required for managing hosts (target=host).
+ required: false
+ default: 'hostname -f'
+ displayname:
+ description:
+ - The display name of a host in your LogicMonitor account or the desired display name of a device to add into monitoring.
+ required: false
+ default: 'hostname -f'
+ fullpath:
+ description:
+ - The fullpath of the hostgroup object you would like to manage.
+ - Recommend running on a single ansible host.
+ - Required for management of LogicMonitor host groups (target=hostgroup).
+ required: false
+ default: null
+...
+'''
+
+EXAMPLES = '''
+#example of querying a list of hosts
+```
+---
+- hosts: hosts
+ user: root
+ vars:
+ company: 'yourcompany'
+ user: 'Luigi'
+ password: 'ImaLuigi,number1!'
+ tasks:
+ - name: query a list of hosts
+ # All tasks should use local_action
+ local_action:
+ logicmonitor_facts:
+ target: host
+ company: '{{ company }}'
+ user: '{{ user }}'
+ password: '{{ password }}'
+```
+
+#example of querying a hostgroup
+```
+---
+- hosts: somemachine.superheroes.com
+ user: root
+ vars:
+ company: 'yourcompany'
+ user: 'mario'
+ password: 'itsame.Mario!'
+ tasks:
+ - name: query a host group
+ # All tasks should use local_action
+ local_action:
+ logicmonitor_facts:
+ target: hostgroup
+ fullpath: '/servers/production'
+ company: '{{ company }}'
+ user: '{{ user }}'
+ password: '{{ password }}'
+```
+'''
+
+
+RETURN = '''
+---
+ ansible_facts:
+ description: LogicMonitor properties set for the specified object
+ returned: success
+ type: list of dicts containing name/value pairs
+ example: >
+ {
+ "name": "dc",
+ "value": "1"
+ },
+ {
+ "name": "type",
+ "value": "prod"
+ },
+ {
+ "name": "system.categories",
+ "value": ""
+ },
+ {
+ "name": "snmp.community",
+ "value": "********"
+ }
+...
+'''
+
+
+class LogicMonitor(object):
+
+ def __init__(self, module, **params):
+ self.__version__ = "1.0-python"
+ self.module = module
+ self.module.debug("Instantiating LogicMonitor object")
+
+ self.check_mode = False
+ self.company = params["company"]
+ self.user = params["user"]
+ self.password = params["password"]
+ self.fqdn = socket.getfqdn()
+ self.lm_url = "logicmonitor.com/santaba"
+ self.__version__ = self.__version__ + "-ansible-module"
+
+ def rpc(self, action, params):
+ """Make a call to the LogicMonitor RPC library
+ and return the response"""
+ self.module.debug("Running LogicMonitor.rpc")
+
+ param_str = urllib.urlencode(params)
+ creds = urllib.urlencode(
+ {"c": self.company,
+ "u": self.user,
+ "p": self.password})
+
+ if param_str:
+ param_str = param_str + "&"
+
+ param_str = param_str + creds
+
+ try:
+ url = ("https://" + self.company + "." + self.lm_url +
+ "/rpc/" + action + "?" + param_str)
+
+ # Set custom LogicMonitor header with version
+ headers = {"X-LM-User-Agent": self.__version__}
+
+ # Set headers
+ f = open_url(url, headers=headers)
+
+ raw = f.read()
+ resp = json.loads(raw)
+ if resp["status"] == 403:
+ self.module.debug("Authentication failed.")
+ self.fail(msg="Error: " + resp["errmsg"])
+ else:
+ return raw
+ except IOError:
+ ioe = get_exception()
+ self.fail(msg="Error: Exception making RPC call to " +
+ "https://" + self.company + "." + self.lm_url +
+ "/rpc/" + action + "\nException" + str(ioe))
+
+ def get_collectors(self):
+ """Returns a JSON object containing a list of
+ LogicMonitor collectors"""
+ self.module.debug("Running LogicMonitor.get_collectors...")
+
+ self.module.debug("Making RPC call to 'getAgents'")
+ resp = self.rpc("getAgents", {})
+ resp_json = json.loads(resp)
+
+ if resp_json["status"] is 200:
+ self.module.debug("RPC call succeeded")
+ return resp_json["data"]
+ else:
+ self.fail(msg=resp)
+
+ def get_host_by_hostname(self, hostname, collector):
+ """Returns a host object for the host matching the
+ specified hostname"""
+ self.module.debug("Running LogicMonitor.get_host_by_hostname...")
+
+ self.module.debug("Looking for hostname " + hostname)
+ self.module.debug("Making RPC call to 'getHosts'")
+ hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1}))
+
+ if collector:
+ if hostlist_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+
+ hosts = hostlist_json["data"]["hosts"]
+
+ self.module.debug(
+ "Looking for host matching: hostname " + hostname +
+ " and collector " + str(collector["id"]))
+
+ for host in hosts:
+ if (host["hostName"] == hostname and
+ host["agentId"] == collector["id"]):
+
+ self.module.debug("Host match found")
+ return host
+ self.module.debug("No host match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(hostlist_json)
+ else:
+ self.module.debug("No collector specified")
+ return None
+
+ def get_host_by_displayname(self, displayname):
+ """Returns a host object for the host matching the
+ specified display name"""
+ self.module.debug("Running LogicMonitor.get_host_by_displayname...")
+
+ self.module.debug("Looking for displayname " + displayname)
+ self.module.debug("Making RPC call to 'getHost'")
+ host_json = (json.loads(self.rpc("getHost",
+ {"displayName": displayname})))
+
+ if host_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return host_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(host_json)
+ return None
+
+ def get_collector_by_description(self, description):
+ """Returns a JSON collector object for the collector
+ matching the specified FQDN (description)"""
+ self.module.debug(
+ "Running LogicMonitor.get_collector_by_description..."
+ )
+
+ collector_list = self.get_collectors()
+ if collector_list is not None:
+ self.module.debug("Looking for collector with description " +
+ description)
+ for collector in collector_list:
+ if collector["description"] == description:
+ self.module.debug("Collector match found")
+ return collector
+ self.module.debug("No collector match found")
+ return None
+
+ def get_group(self, fullpath):
+ """Returns a JSON group object for the group matching the
+ specified path"""
+ self.module.debug("Running LogicMonitor.get_group...")
+
+ self.module.debug("Making RPC call to getHostGroups")
+ resp = json.loads(self.rpc("getHostGroups", {}))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC called succeeded")
+ groups = resp["data"]
+
+ self.module.debug("Looking for group matching " + fullpath)
+ for group in groups:
+ if group["fullPath"] == fullpath.lstrip('/'):
+ self.module.debug("Group match found")
+ return group
+
+ self.module.debug("No group match found")
+ return None
+ else:
+ self.module.debug("RPC call failed")
+ self.module.debug(resp)
+
+ return None
+
+ def create_group(self, fullpath):
+ """Recursively create a path of host groups.
+ Returns the id of the newly created hostgroup"""
+ self.module.debug("Running LogicMonitor.create_group...")
+
+ res = self.get_group(fullpath)
+ if res:
+ self.module.debug("Group " + fullpath + " exists.")
+ return res["id"]
+
+ if fullpath == "/":
+ self.module.debug("Specified group is root. Doing nothing.")
+ return 1
+ else:
+ self.module.debug("Creating group named " + fullpath)
+ self.module.debug("System changed")
+ self.change = True
+
+ if self.check_mode:
+ self.exit(changed=True)
+
+ parentpath, name = fullpath.rsplit('/', 1)
+ parentgroup = self.get_group(parentpath)
+
+ parentid = 1
+
+ if parentpath == "":
+ parentid = 1
+ elif parentgroup:
+ parentid = parentgroup["id"]
+ else:
+ parentid = self.create_group(parentpath)
+
+ h = None
+
+ # Determine if we're creating a group from host or hostgroup class
+ if hasattr(self, '_build_host_group_hash'):
+ h = self._build_host_group_hash(
+ fullpath,
+ self.description,
+ self.properties,
+ self.alertenable)
+ h["name"] = name
+ h["parentId"] = parentid
+ else:
+ h = {"name": name,
+ "parentId": parentid,
+ "alertEnable": True,
+ "description": ""}
+
+ self.module.debug("Making RPC call to 'addHostGroup'")
+ resp = json.loads(
+ self.rpc("addHostGroup", h))
+
+ if resp["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return resp["data"]["id"]
+ elif resp["errmsg"] == "The record already exists":
+ self.module.debug("The hostgroup already exists")
+ group = self.get_group(fullpath)
+ return group["id"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(
+ msg="Error: unable to create new hostgroup \"" + name +
+ "\".\n" + resp["errmsg"])
+
+ def fail(self, msg):
+ self.module.fail_json(msg=msg, changed=self.change)
+
+ def exit(self, changed):
+ self.module.debug("Changed: " + changed)
+ self.module.exit_json(changed=changed)
+
+ def output_info(self, info):
+ self.module.debug("Registering properties as Ansible facts")
+ self.module.exit_json(changed=False, ansible_facts=info)
+
+
+class Host(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+ self.collector = None
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Host object")
+
+ if self.params["hostname"]:
+ self.module.debug("Hostname is " + self.params["hostname"])
+ self.hostname = self.params['hostname']
+ else:
+ self.module.debug("No hostname specified. Using " + self.fqdn)
+ self.hostname = self.fqdn
+
+ if self.params["displayname"]:
+ self.module.debug("Display name is " + self.params["displayname"])
+ self.displayname = self.params['displayname']
+ else:
+ self.module.debug("No display name specified. Using " + self.fqdn)
+ self.displayname = self.fqdn
+
+ # Attempt to host information via display name of host name
+ self.module.debug("Attempting to find host by displayname " +
+ self.displayname)
+ info = self.get_host_by_displayname(self.displayname)
+
+ if info is not None:
+ self.module.debug("Host found by displayname")
+ # Used the host information to grab the collector description
+ # if not provided
+ if (not hasattr(self.params, "collector") and
+ "agentDescription" in info):
+ self.module.debug("Setting collector from host response. " +
+ "Collector " + info["agentDescription"])
+ self.params["collector"] = info["agentDescription"]
+ else:
+ self.module.debug("Host not found by displayname")
+
+ # At this point, a valid collector description is required for success
+ # Check that the description exists or fail
+ if self.params["collector"]:
+ self.module.debug("Collector specified is " +
+ self.params["collector"])
+ self.collector = (self.get_collector_by_description(
+ self.params["collector"]))
+ else:
+ self.fail(msg="No collector specified.")
+
+ # If the host wasn't found via displayname, attempt by hostname
+ if info is None:
+ self.module.debug("Attempting to find host by hostname " +
+ self.hostname)
+ info = self.get_host_by_hostname(self.hostname, self.collector)
+
+ self.info = info
+
+ def get_properties(self):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Host.get_properties...")
+
+ if self.info:
+ self.module.debug("Making RPC call to 'getHostProperties'")
+ properties_json = (json.loads(self.rpc("getHostProperties",
+ {'hostId': self.info["id"],
+ "filterSystemProperties": True})))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("Error: there was an issue retrieving the " +
+ "host properties")
+ self.module.debug(properties_json["errmsg"])
+
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug(
+ "Unable to find LogicMonitor host which matches " +
+ self.displayname + " (" + self.hostname + ")"
+ )
+ return None
+
+ def site_facts(self):
+ """Output current properties information for the Host"""
+ self.module.debug("Running Host.site_facts...")
+
+ if self.info:
+ self.module.debug("Host exists")
+ props = self.get_properties()
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Host doesn't exit.")
+
+
+class Hostgroup(LogicMonitor):
+
+ def __init__(self, params, module=None):
+ """Initializor for the LogicMonitor host object"""
+ self.change = False
+ self.params = params
+
+ LogicMonitor.__init__(self, module, **self.params)
+ self.module.debug("Instantiating Hostgroup object")
+
+ self.fullpath = self.params["fullpath"]
+ self.info = self.get_group(self.fullpath)
+
+ def get_properties(self, final=False):
+ """Returns a hash of the properties
+ associated with this LogicMonitor host"""
+ self.module.debug("Running Hostgroup.get_properties...")
+
+ if self.info:
+ self.module.debug("Group found")
+
+ self.module.debug("Making RPC call to 'getHostGroupProperties'")
+ properties_json = json.loads(self.rpc(
+ "getHostGroupProperties",
+ {'hostGroupId': self.info["id"],
+ "finalResult": final}))
+
+ if properties_json["status"] == 200:
+ self.module.debug("RPC call succeeded")
+ return properties_json["data"]
+ else:
+ self.module.debug("RPC call failed")
+ self.fail(msg=properties_json["status"])
+ else:
+ self.module.debug("Group not found")
+ return None
+
+ def site_facts(self):
+ """Output current properties information for the Hostgroup"""
+ self.module.debug("Running Hostgroup.site_facts...")
+
+ if self.info:
+ self.module.debug("Group exists")
+ props = self.get_properties(True)
+
+ self.output_info(props)
+ else:
+ self.fail(msg="Error: Group doesn't exit.")
+
+
+def selector(module):
+ """Figure out which object and which actions
+ to take given the right parameters"""
+
+ if module.params["target"] == "host":
+ target = Host(module.params, module)
+ target.site_facts()
+ elif module.params["target"] == "hostgroup":
+ # Validate target specific required parameters
+ if module.params["fullpath"] is not None:
+ target = Hostgroup(module.params, module)
+ target.site_facts()
+ else:
+ module.fail_json(
+ msg="Parameter 'fullpath' required for target 'hostgroup'")
+ else:
+ module.fail_json(
+ msg="Error: Unexpected target \"" + module.params["target"] +
+ "\" was specified.")
+
+
+def main():
+ TARGETS = [
+ "host",
+ "hostgroup"]
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ target=dict(required=True, default=None, choices=TARGETS),
+ company=dict(required=True, default=None),
+ user=dict(required=True, default=None),
+ password=dict(required=True, default=None, no_log=True),
+
+ collector=dict(require=False, default=None),
+ hostname=dict(required=False, default=None),
+ displayname=dict(required=False, default=None),
+ fullpath=dict(required=False, default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_LIB_JSON is not True:
+ module.fail_json(msg="Unable to load JSON library")
+
+ selector(module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.urls import open_url
+
+if __name__ == "__main__":
+ main()
diff --git a/monitoring/monit.py b/monitoring/monit.py
index 3d3c7c8c3ca..5e88c7b54d8 100644
--- a/monitoring/monit.py
+++ b/monitoring/monit.py
@@ -18,6 +18,11 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+import time
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -38,18 +43,29 @@
required: true
default: null
choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
+ timeout:
+ description:
+ - If there are pending actions for the service monitored by monit, then Ansible will check
+ for up to this many seconds to verify the the requested action has been performed.
+ Ansible will sleep for five seconds between each check.
+ required: false
+ default: 300
+ version_added: "2.1"
requirements: [ ]
author: "Darryl Stoflet (@dstoflet)"
'''
EXAMPLES = '''
# Manage the state of program "httpd" to be in "started" state.
-- monit: name=httpd state=started
+- monit:
+ name: httpd
+ state: started
'''
def main():
arg_spec = dict(
name=dict(required=True),
+ timeout=dict(default=300, type='int'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
)
@@ -57,17 +73,10 @@ def main():
name = module.params['name']
state = module.params['state']
+ timeout = module.params['timeout']
MONIT = module.get_bin_path('monit', True)
- if state == 'reloaded':
- if module.check_mode:
- module.exit_json(changed=True)
- rc, out, err = module.run_command('%s reload' % MONIT)
- if rc != 0:
- module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
- module.exit_json(changed=True, name=name, state=state)
-
def status():
"""Return the status of the process in monit, or the empty string if not present."""
rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True)
@@ -86,8 +95,34 @@ def run_command(command):
module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
return status()
- process_status = status()
- present = process_status != ''
+ def wait_for_monit_to_stop_pending():
+ """Fails this run if there is no status or it's pending/initalizing for timeout"""
+ timeout_time = time.time() + timeout
+ sleep_time = 5
+
+ running_status = status()
+ while running_status == '' or 'pending' in running_status or 'initializing' in running_status:
+ if time.time() >= timeout_time:
+ module.fail_json(
+ msg='waited too long for "pending", or "initiating" status to go away ({0})'.format(
+ running_status
+ ),
+ state=state
+ )
+
+ time.sleep(sleep_time)
+ running_status = status()
+
+ if state == 'reloaded':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command('%s reload' % MONIT)
+ if rc != 0:
+ module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
+ wait_for_monit_to_stop_pending()
+ module.exit_json(changed=True, name=name, state=state)
+
+ present = status() != ''
if not present and not state == 'present':
module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
@@ -98,12 +133,12 @@ def run_command(command):
module.exit_json(changed=True)
status = run_command('reload')
if status == '':
- module.fail_json(msg='%s process not configured with monit' % name, name=name, state=state)
- else:
- module.exit_json(changed=True, name=name, state=state)
+ wait_for_monit_to_stop_pending()
+ module.exit_json(changed=True, name=name, state=state)
module.exit_json(changed=False, name=name, state=state)
- running = 'running' in process_status
+ wait_for_monit_to_stop_pending()
+ running = 'running' in status()
if running and state in ['started', 'monitored']:
module.exit_json(changed=False, name=name, state=state)
@@ -153,4 +188,5 @@ def run_command(command):
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/nagios.py b/monitoring/nagios.py
index ee67a3ae20b..78bd897ed1d 100644
--- a/monitoring/nagios.py
+++ b/monitoring/nagios.py
@@ -15,6 +15,10 @@
# along with this program. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: nagios
@@ -31,8 +35,9 @@
description:
- Action to take.
- servicegroup options were added in 2.0.
+ - delete_downtime options were added in 2.2.
required: true
- choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
+ choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
"silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
"servicegroup_host_downtime" ]
host:
@@ -88,50 +93,107 @@
EXAMPLES = '''
# set 30 minutes of apache downtime
-- nagios: action=downtime minutes=30 service=httpd host={{ inventory_hostname }}
+- nagios:
+ action: downtime
+ minutes: 30
+ service: httpd
+ host: '{{ inventory_hostname }}'
# schedule an hour of HOST downtime
-- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
+- nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
# schedule an hour of HOST downtime, with a comment describing the reason
-- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
- comment='This host needs disciplined'
+- nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+ comment: Rebuilding machine
# schedule downtime for ALL services on HOST
-- nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }}
+- nagios:
+ action: downtime
+ minutes: 45
+ service: all
+ host: '{{ inventory_hostname }}'
# schedule downtime for a few services
-- nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }}
+- nagios:
+ action: downtime
+ services: frob,foobar,qeuz
+ host: '{{ inventory_hostname }}'
# set 30 minutes downtime for all services in servicegroup foo
-- nagios: action=servicegroup_service_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
+- nagios:
+ action: servicegroup_service_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
# set 30 minutes downtime for all host in servicegroup foo
-- nagios: action=servicegroup_host_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }}
+- nagios:
+ action: servicegroup_host_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
+
+# delete all downtime for a given host
+- nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: all
+
+# delete all downtime for HOST with a particular comment
+- nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: host
+ comment: Planned maintenance
# enable SMART disk alerts
-- nagios: action=enable_alerts service=smart host={{ inventory_hostname }}
+- nagios:
+ action: enable_alerts
+ service: smart
+ host: '{{ inventory_hostname }}'
# "two services at once: disable httpd and nfs alerts"
-- nagios: action=disable_alerts service=httpd,nfs host={{ inventory_hostname }}
+- nagios:
+ action: disable_alerts
+ service: httpd,nfs
+ host: '{{ inventory_hostname }}'
# disable HOST alerts
-- nagios: action=disable_alerts service=host host={{ inventory_hostname }}
+- nagios:
+ action: disable_alerts
+ service: host
+ host: '{{ inventory_hostname }}'
# silence ALL alerts
-- nagios: action=silence host={{ inventory_hostname }}
+- nagios:
+ action: silence
+ host: '{{ inventory_hostname }}'
# unsilence all alerts
-- nagios: action=unsilence host={{ inventory_hostname }}
+- nagios:
+ action: unsilence
+ host: '{{ inventory_hostname }}'
# SHUT UP NAGIOS
-- nagios: action=silence_nagios
+- nagios:
+ action: silence_nagios
# ANNOY ME NAGIOS
-- nagios: action=unsilence_nagios
+- nagios:
+ action: unsilence_nagios
# command something
-- nagios: action=command command='DISABLE_FAILURE_PREDICTION'
+- nagios:
+ action: command
+ command: DISABLE_FAILURE_PREDICTION
'''
import ConfigParser
@@ -181,6 +243,7 @@ def which_cmdfile():
def main():
ACTION_CHOICES = [
'downtime',
+ 'delete_downtime',
'silence',
'unsilence',
'enable_alerts',
@@ -242,6 +305,12 @@ def main():
except Exception:
module.fail_json(msg='invalid entry for minutes')
+ ######################################################################
+ if action == 'delete_downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+
######################################################################
if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
@@ -266,7 +335,7 @@ def main():
module.fail_json(msg='no command passed for command action')
##################################################################
if not cmdfile:
- module.fail_json('unable to locate nagios.cfg')
+ module.fail_json(msg='unable to locate nagios.cfg')
##################################################################
ansible_nagios = Nagios(module, **module.params)
@@ -383,6 +452,47 @@ def _fmt_dt_str(self, cmd, host, duration, author=None,
return dt_str
+ def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None):
+ """
+ Format an external-command downtime deletion string.
+
+ cmd - Nagios command ID
+ host - Host to remove scheduled downtime from
+ comment - Reason downtime was added (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ svc - Service to remove downtime for, omit to remove all downtime for the host
+
+ Syntax: [submitted] COMMAND;;
+ [];[];[]
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if comment is None:
+ comment = self.comment
+
+ dt_del_args = []
+ if svc is not None:
+ dt_del_args.append(svc)
+ else:
+ dt_del_args.append('')
+
+ if start is not None:
+ dt_del_args.append(str(start))
+ else:
+ dt_del_args.append('')
+
+ if comment is not None:
+ dt_del_args.append(comment)
+ else:
+ dt_del_args.append('')
+
+ dt_del_arg_str = ";".join(dt_del_args)
+ dt_del_str = hdr + dt_del_arg_str + "\n"
+
+ return dt_del_str
+
def _fmt_notif_str(self, cmd, host=None, svc=None):
"""
Format an external-command notification string.
@@ -462,6 +572,26 @@ def schedule_host_svc_downtime(self, host, minutes=30):
dt_cmd_str = self._fmt_dt_str(cmd, host, minutes)
self._write_command(dt_cmd_str)
+ def delete_host_downtime(self, host, services=None, comment=None):
+ """
+ This command is used to remove scheduled downtime for a particular
+ host.
+
+ Syntax: DEL_DOWNTIME_BY_HOST_NAME;;
+ [];[];[]
+ """
+
+ cmd = "DEL_DOWNTIME_BY_HOST_NAME"
+
+ if services is None:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment)
+ self._write_command(dt_del_cmd_str)
+ else:
+ for service in services:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
+ self._write_command(dt_del_cmd_str)
+
+
def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30):
"""
This command is used to schedule downtime for all hosts in a
@@ -873,7 +1003,7 @@ def nagios_cmd(self, cmd):
pre = '[%s]' % int(time.time())
post = '\n'
- cmdstr = '%s %s %s' % (pre, cmd, post)
+ cmdstr = '%s %s%s' % (pre, cmd, post)
self._write_command(cmdstr)
def act(self):
@@ -891,6 +1021,15 @@ def act(self):
self.schedule_svc_downtime(self.host,
services=self.services,
minutes=self.minutes)
+
+ elif self.action == 'delete_downtime':
+ if self.services=='host':
+ self.delete_host_downtime(self.host)
+ elif self.services=='all':
+ self.delete_host_downtime(self.host, comment='')
+ else:
+ self.delete_host_downtime(self.host, services=self.services)
+
elif self.action == "servicegroup_host_downtime":
if self.servicegroup:
self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes)
@@ -943,4 +1082,6 @@ def act(self):
######################################################################
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py
index 3d9bc6c0ec3..c8f8703230d 100644
--- a/monitoring/newrelic_deployment.py
+++ b/monitoring/newrelic_deployment.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: newrelic_deployment
@@ -76,10 +80,11 @@
'''
EXAMPLES = '''
-- newrelic_deployment: token=AAAAAA
- app_name=myapp
- user='ansible deployment'
- revision=1.0
+- newrelic_deployment:
+ token: AAAAAA
+ app_name: myapp
+ user: ansible deployment
+ revision: '1.0'
'''
import urllib
@@ -92,7 +97,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- token=dict(required=True),
+ token=dict(required=True, no_log=True),
app_name=dict(required=False),
application_id=dict(required=False),
changelog=dict(required=False),
@@ -143,5 +148,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
-
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/pagerduty.py b/monitoring/pagerduty.py
index 99a9be8a044..43d93501c16 100644
--- a/monitoring/pagerduty.py
+++ b/monitoring/pagerduty.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: pagerduty
@@ -117,43 +121,54 @@
EXAMPLES='''
# List ongoing maintenance windows using a user/passwd
-- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing
+- pagerduty:
+ name: companyabc
+ user: example@example.com
+ passwd: password123
+ state: ongoing
# List ongoing maintenance windows using a token
-- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing
+- pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ state: ongoing
# Create a 1 hour maintenance window for service FOO123, using a user/passwd
-- pagerduty: name=companyabc
- user=example@example.com
- passwd=password123
- state=running
- service=FOO123
+- pagerduty:
+ name: companyabc
+ user: example@example.com
+ passwd: password123
+ state: running
+ service: FOO123
# Create a 5 minute maintenance window for service FOO123, using a token
-- pagerduty: name=companyabc
- token=xxxxxxxxxxxxxx
- hours=0
- minutes=5
- state=running
- service=FOO123
+- pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ hours: 0
+ minutes: 5
+ state: running
+ service: FOO123
# Create a 4 hour maintenance window for service FOO123 with the description "deployment".
-- pagerduty: name=companyabc
- user=example@example.com
- passwd=password123
- state=running
- service=FOO123
- hours=4
- desc=deployment
+- pagerduty:
+ name: companyabc
+ user: example@example.com
+ passwd: password123
+ state: running
+ service: FOO123
+ hours: 4
+ desc: deployment
register: pd_window
# Delete the previous maintenance window
-- pagerduty: name=companyabc
- user=example@example.com
- passwd=password123
- state=absent
- service={{ pd_window.result.maintenance_window.id }}
+- pagerduty:
+ name: companyabc
+ user: example@example.com
+ passwd: password123
+ state: absent
+ service: '{{ pd_window.result.maintenance_window.id }}'
'''
import datetime
@@ -203,7 +218,7 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu
data = json.dumps(request_data)
response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
- if info['status'] != 200:
+ if info['status'] != 201:
module.fail_json(msg="failed to create the window: %s" % info['msg'])
try:
@@ -229,7 +244,7 @@ def absent(module, name, user, passwd, token, requester_id, service):
data = json.dumps(request_data)
response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE')
- if info['status'] != 200:
+ if info['status'] != 204:
module.fail_json(msg="failed to delete the window: %s" % info['msg'])
try:
@@ -296,4 +311,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/pagerduty_alert.py b/monitoring/pagerduty_alert.py
index e2d127f0155..f011b902703 100644
--- a/monitoring/pagerduty_alert.py
+++ b/monitoring/pagerduty_alert.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: pagerduty_alert
diff --git a/monitoring/pingdom.py b/monitoring/pingdom.py
index 4346e8ca6fe..d37ae44ab19 100644
--- a/monitoring/pingdom.py
+++ b/monitoring/pingdom.py
@@ -15,6 +15,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: pingdom
@@ -69,18 +73,20 @@
EXAMPLES = '''
# Pause the check with the ID of 12345.
-- pingdom: uid=example@example.com
- passwd=password123
- key=apipassword123
- checkid=12345
- state=paused
+- pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: paused
# Unpause the check with the ID of 12345.
-- pingdom: uid=example@example.com
- passwd=password123
- key=apipassword123
- checkid=12345
- state=running
+- pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: running
'''
try:
@@ -149,4 +155,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/rollbar_deployment.py b/monitoring/rollbar_deployment.py
index 060193b78a5..5ee332fcf2c 100644
--- a/monitoring/rollbar_deployment.py
+++ b/monitoring/rollbar_deployment.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rollbar_deployment
@@ -68,16 +72,22 @@
'''
EXAMPLES = '''
-- rollbar_deployment: token=AAAAAA
- environment='staging'
- user='ansible'
- revision=4.2,
- rollbar_user='admin',
- comment='Test Deploy'
+- rollbar_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: '4.2'
+ rollbar_user: admin
+ comment: Test Deploy
'''
import urllib
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import fetch_url
+
+
def main():
module = AnsibleModule(
@@ -120,7 +130,8 @@ def main():
try:
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg='Unable to notify Rollbar: %s' % e)
else:
if info['status'] == 200:
@@ -128,7 +139,6 @@ def main():
else:
module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py
index 9a004d372e0..77a39647cf6 100644
--- a/monitoring/sensu_check.py
+++ b/monitoring/sensu_check.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: sensu_check
@@ -146,7 +150,20 @@
default: null
high_flap_threshold:
description:
- - The low threshhold for flap detection
+ - The high threshhold for flap detection
+ required: false
+ default: null
+ custom:
+ version_added: "2.1"
+ description:
+ - A hash/dictionary of custom parameters for mixing to the configuration.
+ - You can't rewrite others module parameters using this
+ required: false
+ default: {}
+ source:
+ version_added: "2.1"
+ description:
+ - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
required: false
default: null
requirements: [ ]
@@ -157,39 +174,53 @@
# Fetch metrics about the CPU load every 60 seconds,
# the sensu server has a handler called 'relay' which forwards stats to graphite
- name: get cpu metrics
- sensu_check: name=cpu_load
- command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb
- metric=yes handlers=relay subscribers=common interval=60
+ sensu_check:
+ name: cpu_load
+ command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb
+ metric: yes
+ handlers: relay
+ subscribers: common
+ interval: 60
# Check whether nginx is running
- name: check nginx process
- sensu_check: name=nginx_running
- command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid'
- handlers=default subscribers=nginx interval=60
+ sensu_check:
+ name: nginx_running
+ command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid
+ handlers: default
+ subscribers: nginx
+ interval: 60
# Stop monitoring the disk capacity.
# Note that the check will still show up in the sensu dashboard,
# to remove it completely you need to issue a DELETE request to the sensu api.
- name: check disk
- sensu_check: name=check_disk_capacity state=absent
+ sensu_check:
+ name: check_disk_capacity
+ state: absent
'''
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
def sensu_check(module, path, name, state='present', backup=False):
changed = False
reasons = []
- try:
- import json
- except ImportError:
- import simplejson as json
-
stream = None
try:
try:
stream = open(path, 'r')
config = json.load(stream)
- except IOError, e:
+ except IOError:
+ e = get_exception()
if e.errno is 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
@@ -240,6 +271,7 @@ def sensu_check(module, path, name, state='present', backup=False):
'aggregate',
'low_flap_threshold',
'high_flap_threshold',
+ 'source',
]
for opt in simple_opts:
if module.params[opt] is not None:
@@ -253,6 +285,31 @@ def sensu_check(module, path, name, state='present', backup=False):
changed = True
reasons.append('`{opt}\' was removed'.format(opt=opt))
+ if module.params['custom']:
+ # Convert to json
+ custom_params = module.params['custom']
+ overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type','subdue','subdue_begin','subdue_end'])
+ if overwrited_fields:
+ msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
+ module.fail_json(msg=msg)
+
+ for k,v in custom_params.items():
+ if k in config['checks'][name]:
+ if not config['checks'][name][k] == v:
+ changed = True
+ reasons.append('`custom param {opt}\' was changed'.format(opt=k))
+ else:
+ changed = True
+ reasons.append('`custom param {opt}\' was added'.format(opt=k))
+ check[k] = v
+ simple_opts += custom_params.keys()
+
+ # Remove obsolete custom params
+ for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type','subdue','subdue_begin','subdue_end']):
+ changed = True
+ reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
+ del check[opt]
+
if module.params['metric']:
if 'type' not in check or check['type'] != 'metric':
check['type'] = 'metric'
@@ -284,7 +341,8 @@ def sensu_check(module, path, name, state='present', backup=False):
try:
stream = open(path, 'w')
stream.write(json.dumps(config, indent=2) + '\n')
- except IOError, e:
+ except IOError:
+ e = get_exception()
module.fail_json(msg=str(e))
finally:
if stream:
@@ -316,6 +374,8 @@ def main():
'aggregate': {'type': 'bool'},
'low_flap_threshold': {'type': 'int'},
'high_flap_threshold': {'type': 'int'},
+ 'custom': {'type': 'dict'},
+ 'source': {'type': 'str'},
}
required_together = [['subdue_begin', 'subdue_end']]
@@ -336,4 +396,7 @@ def main():
module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
from ansible.module_utils.basic import *
-main()
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/sensu_subscription.py b/monitoring/sensu_subscription.py
new file mode 100644
index 00000000000..90535ad2d0b
--- /dev/null
+++ b/monitoring/sensu_subscription.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: sensu_subscription
+short_description: Manage Sensu subscriptions
+version_added: 2.2
+description:
+ - Manage which I(sensu channels) a machine should subscribe to
+options:
+ name:
+ description:
+ - The name of the channel
+ required: true
+ state:
+ description:
+ - Whether the machine should subscribe or unsubscribe from the channel
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ path:
+ description:
+ - Path to the subscriptions json file
+ required: false
+ default: /etc/sensu/conf.d/subscriptions.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so you
+ - can get the original file back if you somehow clobbered it incorrectly.
+ choices: [ 'yes', 'no' ]
+ required: false
+ default: no
+requirements: [ ]
+author: Anders Ingemann
+'''
+
+RETURN = '''
+reasons:
+ description: the reasons why the moule changed or did not change something
+ returned: success
+ type: list
+ sample: ["channel subscription was absent and state is `present'"]
+'''
+
+EXAMPLES = '''
+# Subscribe to the nginx channel
+- name: subscribe to nginx checks
+ sensu_subscription: name=nginx
+
+# Unsubscribe from the common checks channel
+- name: unsubscribe from common checks
+ sensu_subscription: name=common state=absent
+'''
+
+
+def sensu_subscription(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ try:
+ import json
+ except ImportError:
+ import simplejson as json
+
+ try:
+ config = json.load(open(path))
+ except IOError:
+ e = get_exception()
+ if e.errno is 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=str(e))
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+
+ if 'client' not in config:
+ if state == 'absent':
+ reasons.append('`client\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client'] = {}
+ changed = True
+ reasons.append('`client\' did not exist')
+
+ if 'subscriptions' not in config['client']:
+ if state == 'absent':
+ reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
+ return changed
+ config['client']['subscriptions'] = []
+ changed = True
+ reasons.append('`client.subscriptions\' did not exist')
+
+ if name not in config['client']['subscriptions']:
+ if state == 'absent':
+ reasons.append('channel subscription was absent')
+ return changed
+ config['client']['subscriptions'].append(name)
+ changed = True
+ reasons.append('channel subscription was absent and state is `present\'')
+ else:
+ if state == 'absent':
+ config['client']['subscriptions'].remove(name)
+ changed = True
+ reasons.append('channel subscription was present and state is `absent\'')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ open(path, 'w').write(json.dumps(config, indent=2) + '\n')
+ except IOError:
+ e = get_exception()
+ module.fail_json(msg='Failed to write to file %s: %s' % (path, str(e)))
+
+ return changed, reasons
+
+
+def main():
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'str', 'default': 'no', 'type': 'bool'},
+ }
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_subscription(module, path, name, state, backup)
+
+ module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/stackdriver.py b/monitoring/stackdriver.py
index 7b3688cbefc..b20b1911588 100644
--- a/monitoring/stackdriver.py
+++ b/monitoring/stackdriver.py
@@ -15,6 +15,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: stackdriver
@@ -84,18 +88,40 @@
'''
EXAMPLES = '''
-- stackdriver: key=AAAAAA event=deploy deployed_to=production deployed_by=leeroyjenkins repository=MyWebApp revision_id=abcd123
-
-- stackdriver: key=AAAAAA event=annotation msg="Greetings from Ansible" annotated_by=leeroyjenkins level=WARN instance_id=i-abcd1234
+- stackdriver:
+ key: AAAAAA
+ event: deploy
+ deployed_to: production
+ deployed_by: leeroyjenkins
+ repository: MyWebApp
+ revision_id: abcd123
+
+- stackdriver:
+ key: AAAAAA
+ event: annotation
+ msg: Greetings from Ansible
+ annotated_by: leeroyjenkins
+ level: WARN
+ instance_id: i-abcd1234
'''
# ===========================================
# Stackdriver module specific support methods.
#
+
try:
- import json
+ import json
except ImportError:
- import simplejson as json
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import fetch_url
+
def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None):
"""Send a deploy event to Stackdriver"""
@@ -189,7 +215,8 @@ def main():
module.fail_json(msg="revision_id required for deploy events")
try:
send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg="unable to sent deploy event: %s" % e)
if event == 'annotation':
@@ -197,14 +224,13 @@ def main():
module.fail_json(msg="msg required for annotation events")
try:
send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg="unable to sent annotation event: %s" % e)
changed = True
module.exit_json(changed=changed, deployed_by=deployed_by)
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/statusio_maintenance.py b/monitoring/statusio_maintenance.py
new file mode 100644
index 00000000000..5533e454713
--- /dev/null
+++ b/monitoring/statusio_maintenance.py
@@ -0,0 +1,484 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Benjamin Copeland (@bhcopeland)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+
+module: statusio_maintenance
+short_description: Create maintenance windows for your status.io dashboard
+description:
+ - Creates a maintenance window for status.io
+ - Deletes a maintenance window for status.io
+notes:
+ - You can use the apiary API url (http://docs.statusio.apiary.io/) to
+ capture API traffic
+ - Use start_date and start_time with minutes to set future maintenance window
+version_added: "2.2"
+author: Benjamin Copeland (@bhcopeland)
+options:
+ title:
+ description:
+ - A descriptive title for the maintenance window
+ required: false
+ default: "A new maintenance window"
+ desc:
+ description:
+ - Message describing the maintenance window
+ required: false
+ default: "Created by Ansible"
+ state:
+ description:
+ - Desired state of the package.
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ api_id:
+ description:
+ - Your unique API ID from status.io
+ required: true
+ api_key:
+ description:
+ - Your unique API Key from status.io
+ required: true
+ statuspage:
+ description:
+ - Your unique StatusPage ID from status.io
+ required: true
+ url:
+ description:
+ - Status.io API URL. A private apiary can be used instead.
+ required: false
+ default: "https://api.status.io"
+ components:
+ description:
+ - The given name of your component (server name)
+ required: false
+ aliases: ['component']
+ default: None
+ containers:
+ description:
+ - The given name of your container (data center)
+ required: false
+ aliases: ['container']
+ default: None
+ all_infrastructure_affected:
+ description:
+ - If it affects all components and containers
+ required: false
+ default: false
+ automation:
+ description:
+ - Automatically start and end the maintenance window
+ required: false
+ default: false
+ maintenance_notify_now:
+ description:
+ - Notify subscribers now
+ required: false
+ default: false
+ maintenance_notify_72_hr:
+ description:
+ - Notify subscribers 72 hours before maintenance start time
+ required: false
+ default: false
+ maintenance_notify_24_hr:
+ description:
+ - Notify subscribers 24 hours before maintenance start time
+ required: false
+ default: false
+ maintenance_notify_1_hr:
+ description:
+ - Notify subscribers 1 hour before maintenance start time
+ required: false
+ default: false
+ maintenance_id:
+ description:
+ - The maintenance id number when deleting a maintenance window
+ required: false
+ default: None
+ minutes:
+ description:
+ - The length of time in UTC that the maintenance will run \
+ (starting from playbook runtime)
+ required: false
+ default: 10
+ start_date:
+ description:
+ - Date maintenance is expected to start (Month/Day/Year) (UTC)
+ - End Date is worked out from start_date + minutes
+ required: false
+ default: None
+ start_time:
+ description:
+ - Time maintenance is expected to start (Hour:Minutes) (UTC)
+ - End Time is worked out from start_time + minutes
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+# Create a maintenance window for 10 minutes on server1.example.com, with
+automation to stop the maintenance.
+- statusio_maintenance:
+ title: "Router Upgrade from ansible"
+ desc: "Performing a Router Upgrade"
+ components: "server1.example.com"
+ api_id: "api_id"
+ api_key: "api_key"
+ statuspage: "statuspage_id"
+ maintenance_notify_1_hr: true
+ automation: true
+
+# Create a maintenance window for 60 minutes on multiple hosts
+- name: "Create maintenance window for server1 and server2"
+ local_action:
+ module: statusio_maintenance
+ title: "Routine maintenance"
+ desc: "Some security updates"
+ components:
+ - "server1.example.com"
+ - "server2.example.com"
+ minutes: "60"
+ api_id: "api_id"
+ api_key: "api_key"
+ statuspage: "statuspage_id"
+ maintenance_notify_1_hr: true
+ automation: true
+
+# Create a future maintenance window for 24 hours to all hosts inside the
+# Primary Data Center
+- statusio_maintenance:
+ title: Data center downtime
+ desc: Performing a Upgrade to our data center
+ components: "Primary Data Center"
+ api_id: "api_id"
+ api_key: "api_key"
+ statuspage: "statuspage_id"
+ start_date: "01/01/2016"
+ start_time: "12:00"
+ minutes: 1440
+
+# Delete a maintenance window
+- statusio_maintenance:
+ title: "Remove a maintenance window"
+ maintenance_id: "561f90faf74bc94a4700087b"
+ statuspage: "statuspage_id"
+ api_id: "api_id"
+ api_key: "api_key"
+ state: absent
+
+'''
+# TODO: Add RETURN documentation.
+RETURN = ''' # '''
+
+import datetime
+
+
+def get_api_auth_headers(api_id, api_key, url, statuspage):
+
+ headers = {
+ "x-api-id": api_id,
+ "x-api-key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ response = open_url(
+ url + "/v2/component/list/" + statuspage, headers=headers)
+ data = json.loads(response.read())
+ if data['status']['message'] == 'Authentication failed':
+ return 1, None, None, "Authentication failed: " \
+ "Check api_id/api_key and statuspage id."
+ else:
+ auth_headers = headers
+ auth_content = data
+ except:
+ return 1, None, None, e
+ return 0, auth_headers, auth_content, None
+
+
+def get_component_ids(auth_content, components):
+ host_ids = []
+ lower_components = [x.lower() for x in components]
+ for result in auth_content["result"]:
+ if result['name'].lower() in lower_components:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_components.remove(result['name'].lower())
+ if len(lower_components):
+ # items not found in the api
+ return 1, None, lower_components
+ return 0, host_ids, None
+
+
+def get_container_ids(auth_content, containers):
+ host_ids = []
+ lower_containers = [x.lower() for x in containers]
+ for result in auth_content["result"]:
+ if result["containers"][0]["name"].lower() in lower_containers:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_containers.remove(result["containers"][0]["name"].lower())
+
+ if len(lower_containers):
+ # items not found in the api
+ return 1, None, lower_containers
+ return 0, host_ids, None
+
+
+def get_date_time(start_date, start_time, minutes):
+ returned_date = []
+ if start_date and start_time:
+ try:
+ datetime.datetime.strptime(start_date, '%m/%d/%Y')
+ returned_date.append(start_date)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_date format."
+ try:
+ datetime.datetime.strptime(start_time, '%H:%M')
+ returned_date.append(start_time)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_time format."
+ try:
+ # Work out end date/time based on minutes
+ date_time_start = datetime.datetime.strptime(
+ start_time + start_date, '%H:%M%m/%d/%Y')
+ delta = date_time_start + datetime.timedelta(minutes=minutes)
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ except (NameError, ValueError):
+ return 1, None, "Couldn't work out a valid date"
+ else:
+ now = datetime.datetime.utcnow()
+ delta = now + datetime.timedelta(minutes=minutes)
+ # start_date
+ returned_date.append(now.strftime("%m/%d/%Y"))
+ returned_date.append(now.strftime("%H:%M"))
+ # end_date
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ return 0, returned_date, None
+
+
+def create_maintenance(auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation, title, desc,
+ returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr):
+ returned_dates = [[x] for x in returned_date]
+ component_id = []
+ container_id = []
+ for val in host_ids:
+ component_id.append(val['component_id'])
+ container_id.append(val['container_id'])
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "components": component_id,
+ "containers": container_id,
+ "all_infrastructure_affected":
+ str(int(all_infrastructure_affected)),
+ "automation": str(int(automation)),
+ "maintenance_name": title,
+ "maintenance_details": desc,
+ "date_planned_start": returned_dates[0],
+ "time_planned_start": returned_dates[1],
+ "date_planned_end": returned_dates[2],
+ "time_planned_end": returned_dates[3],
+ "maintenance_notify_now": str(int(maintenance_notify_now)),
+ "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)),
+ "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)),
+ "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr))
+ })
+ response = open_url(
+ url + "/v2/maintenance/schedule", data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+
+ if data["status"]["error"] == "yes":
+ return 1, None, data["status"]["message"]
+ except Exception:
+ e = get_exception()
+ return 1, None, str(e)
+ return 0, None, None
+
+
+def delete_maintenance(auth_headers, url, statuspage, maintenance_id):
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "maintenance_id": maintenance_id,
+ })
+ response = open_url(
+ url=url + "/v2/maintenance/delete",
+ data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+ if data["status"]["error"] == "yes":
+ return 1, None, "Invalid maintenance_id"
+ except Exception:
+ e = get_exception()
+ return 1, None, str(e)
+ return 0, None, None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_id=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ statuspage=dict(required=True),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ url=dict(default='https://api.status.io', required=False),
+ components=dict(type='list', required=False, default=None,
+ aliases=['component']),
+ containers=dict(type='list', required=False, default=None,
+ aliases=['container']),
+ all_infrastructure_affected=dict(type='bool', default=False,
+ required=False),
+ automation=dict(type='bool', default=False, required=False),
+ title=dict(required=False, default='A new maintenance window'),
+ desc=dict(required=False, default='Created by Ansible'),
+ minutes=dict(type='int', required=False, default=10),
+ maintenance_notify_now=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_72_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_24_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_1_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_id=dict(required=False, default=None),
+ start_date=dict(default=None, required=False),
+ start_time=dict(default=None, required=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ api_id = module.params['api_id']
+ api_key = module.params['api_key']
+ statuspage = module.params['statuspage']
+ state = module.params['state']
+ url = module.params['url']
+ components = module.params['components']
+ containers = module.params['containers']
+ all_infrastructure_affected = module.params['all_infrastructure_affected']
+ automation = module.params['automation']
+ title = module.params['title']
+ desc = module.params['desc']
+ minutes = module.params['minutes']
+ maintenance_notify_now = module.params['maintenance_notify_now']
+ maintenance_notify_72_hr = module.params['maintenance_notify_72_hr']
+ maintenance_notify_24_hr = module.params['maintenance_notify_24_hr']
+ maintenance_notify_1_hr = module.params['maintenance_notify_1_hr']
+ maintenance_id = module.params['maintenance_id']
+ start_date = module.params['start_date']
+ start_time = module.params['start_time']
+
+ if state == "present":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+ auth_content = {}
+
+ if minutes or start_time and start_date:
+ (rc, returned_date, error) = get_date_time(
+ start_date, start_time, minutes)
+ if rc != 0:
+ module.fail_json(msg="Failed to set date/time: %s" % error)
+
+ if not components and not containers:
+ return module.fail_json(msg="A Component or Container must be "
+ "defined")
+ elif components and containers:
+ return module.fail_json(msg="Components and containers cannot "
+ "be used together")
+ else:
+ if components:
+ (rc, host_ids, error) = get_component_ids(auth_content,
+ components)
+ if rc != 0:
+ module.fail_json(msg="Failed to find component %s" % error)
+
+ if containers:
+ (rc, host_ids, error) = get_container_ids(auth_content,
+ containers)
+ if rc != 0:
+ module.fail_json(msg="Failed to find container %s" % error)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = create_maintenance(
+ auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation,
+ title, desc, returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr)
+ if rc == 0:
+ module.exit_json(changed=True, result="Successfully created "
+ "maintenance")
+ else:
+ module.fail_json(msg="Failed to create maintenance: %s"
+ % error)
+
+ if state == "absent":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = delete_maintenance(
+ auth_headers, url, statuspage, maintenance_id)
+ if rc == 0:
+ module.exit_json(
+ changed=True,
+ result="Successfully deleted maintenance"
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete maintenance: %s" % error)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/uptimerobot.py b/monitoring/uptimerobot.py
index bdff8f1f134..3a87c3838a6 100644
--- a/monitoring/uptimerobot.py
+++ b/monitoring/uptimerobot.py
@@ -15,6 +15,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: uptimerobot
@@ -53,18 +57,27 @@
EXAMPLES = '''
# Pause the monitor with an ID of 12345.
-- uptimerobot: monitorid=12345
- apikey=12345-1234512345
- state=paused
+- uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: paused
# Start the monitor with an ID of 12345.
-- uptimerobot: monitorid=12345
- apikey=12345-1234512345
- state=started
-
+- uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: started
'''
-import json
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
import urllib
import time
diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py
index 4aad1218789..ff90db01bea 100644
--- a/monitoring/zabbix_group.py
+++ b/monitoring/zabbix_group.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: zabbix_group
@@ -49,6 +53,18 @@
description:
- Zabbix user password.
required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
state:
description:
- Create or delete host group.
@@ -114,7 +130,7 @@ def create_host_group(self, group_names):
try:
group_add_list = []
for group_name in group_names:
- result = self._zapi.hostgroup.exists({'name': group_name})
+ result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
try:
if self._module.check_mode:
@@ -124,7 +140,7 @@ def create_host_group(self, group_names):
except Already_Exists:
return group_add_list
return group_add_list
- except Exception, e:
+ except Exception as e:
self._module.fail_json(msg="Failed to create host group(s): %s" % e)
# delete host group(s)
@@ -133,7 +149,7 @@ def delete_host_group(self, group_ids):
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.hostgroup.delete(group_ids)
- except Exception, e:
+ except Exception as e:
self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e)
# get group ids by name
@@ -150,10 +166,12 @@ def get_group_ids(self, host_groups):
def main():
module = AnsibleModule(
argument_spec=dict(
- server_url=dict(required=True, aliases=['url']),
- login_user=dict(required=True),
- login_password=dict(required=True, no_log=True),
- host_groups=dict(required=True, aliases=['host_group']),
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str',required=False, default=None),
+ http_login_password=dict(type='str',required=False, default=None, no_log=True),
+ host_groups=dict(type='list', required=True, aliases=['host_group']),
state=dict(default="present", choices=['present','absent']),
timeout=dict(type='int', default=10)
),
@@ -166,6 +184,8 @@ def main():
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
host_groups = module.params['host_groups']
state = module.params['state']
timeout = module.params['timeout']
@@ -174,9 +194,9 @@ def main():
# login to zabbix
try:
- zbx = ZabbixAPI(server_url, timeout=timeout)
+ zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
- except Exception, e:
+ except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
hostGroup = HostGroup(module, zbx)
@@ -206,4 +226,6 @@ def main():
module.exit_json(changed=False)
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py
index 6fac82c7177..aa113efe508 100644
--- a/monitoring/zabbix_host.py
+++ b/monitoring/zabbix_host.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: zabbix_host
@@ -26,7 +30,7 @@
description:
- This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
version_added: "2.0"
-author:
+author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
@@ -47,11 +51,28 @@
description:
- Zabbix user password.
required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
host_name:
description:
- Name of the host in Zabbix.
- host_name is the unique identifier used and cannot be updated using this module.
required: true
+ visible_name:
+ description:
+ - Visible name of the host in Zabbix.
+ required: false
+ version_added: '2.3'
host_groups:
description:
- List of host groups the host is part of.
@@ -61,6 +82,13 @@
- List of templates linked to the host.
required: false
default: None
+ inventory_mode:
+ description:
+ - Configure the inventory mode.
+ choices: ['automatic', 'manual', 'disabled']
+ required: false
+ default: None
+ version_added: '2.1'
status:
description:
- Monitoring status of the host.
@@ -91,6 +119,13 @@
- 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
required: false
default: []
+ force:
+ description:
+ - Overwrite the host configuration, even if already present
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ version_added: "2.0"
'''
EXAMPLES = '''
@@ -101,6 +136,7 @@
login_user: username
login_password: password
host_name: ExampleHost
+ visible_name: ExampleName
host_groups:
- Example group1
- Example group2
@@ -109,6 +145,7 @@
- Example template2
status: enabled
state: present
+ inventory_mode: automatic
interfaces:
- type: 1
main: 1
@@ -143,8 +180,8 @@
class ZabbixAPIExtends(ZabbixAPI):
hostinterface = None
- def __init__(self, server, timeout, **kwargs):
- ZabbixAPI.__init__(self, server, timeout=timeout)
+ def __init__(self, server, timeout, user, passwd, **kwargs):
+ ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
@@ -155,13 +192,13 @@ def __init__(self, module, zbx):
# exist host
def is_host_exist(self, host_name):
- result = self._zapi.host.exists({'host': host_name})
+ result = self._zapi.host.get({'filter': {'host': host_name}})
return result
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
- result = self._zapi.hostgroup.exists({'name': group_name})
+ result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
return True
@@ -179,24 +216,30 @@ def get_template_ids(self, template_list):
template_ids.append(template_id)
return template_ids
- def add_host(self, host_name, group_ids, status, interfaces, proxy_id):
+ def add_host(self, host_name, group_ids, status, interfaces, proxy_id, visible_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
+ if visible_name:
+ parameters['name'] = visible_name
host_list = self._zapi.host.create(parameters)
if len(host_list) >= 1:
return host_list['hostids'][0]
- except Exception, e:
+ except Exception as e:
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
- def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id):
+ def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id, visible_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
- parameters = {'hostid': host_id, 'groups': group_ids, 'status': status, 'proxy_hostid': proxy_id}
+ parameters = {'hostid': host_id, 'groups': group_ids, 'status': status}
+ if proxy_id:
+ parameters['proxy_hostid'] = proxy_id
+ if visible_name:
+ parameters['name'] = visible_name
self._zapi.host.update(parameters)
interface_list_copy = exist_interface_list
if interfaces:
@@ -224,15 +267,15 @@ def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_i
remove_interface_ids.append(interface_id)
if len(remove_interface_ids) > 0:
self._zapi.hostinterface.delete(remove_interface_ids)
- except Exception, e:
+ except Exception as e:
self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
def delete_host(self, host_id, host_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
- self._zapi.host.delete({'hostid': host_id})
- except Exception, e:
+ self._zapi.host.delete([host_id])
+ except Exception as e:
self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
# get host by host name
@@ -282,9 +325,11 @@ def get_host_groups_by_host_id(self, host_id):
# check the exist_interfaces whether it equals the interfaces or not
def check_interface_properties(self, exist_interface_list, interfaces):
interfaces_port_list = []
- if len(interfaces) >= 1:
- for interface in interfaces:
- interfaces_port_list.append(int(interface['port']))
+
+ if interfaces is not None:
+ if len(interfaces) >= 1:
+ for interface in interfaces:
+ interfaces_port_list.append(int(interface['port']))
exist_interface_ports = []
if len(exist_interface_list) >= 1:
@@ -311,7 +356,7 @@ def get_host_status_by_host(self, host):
# check all the properties before link or clear template
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
- exist_interfaces, host, proxy_id):
+ exist_interfaces, host, proxy_id, visible_name):
# get the existing host's groups
exist_host_groups = self.get_host_groups_by_host_id(host_id)
if set(host_groups) != set(exist_host_groups):
@@ -333,7 +378,10 @@ def check_all_properties(self, host_id, host_groups, status, interfaces, templat
if host['proxy_hostid'] != proxy_id:
return True
-
+
+ if host['name'] != visible_name:
+ return True
+
return False
# link or clear template of the host
@@ -353,24 +401,52 @@ def link_or_clear_template(self, host_id, template_id_list):
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
- except Exception, e:
+ except Exception as e:
self._module.fail_json(msg="Failed to link template to host: %s" % e)
+ # Update the host inventory_mode
+ def update_inventory_mode(self, host_id, inventory_mode):
+
+ # nothing was set, do nothing
+ if not inventory_mode:
+ return
+
+ if inventory_mode == "automatic":
+ inventory_mode = int(1)
+ elif inventory_mode == "manual":
+ inventory_mode = int(0)
+ elif inventory_mode == "disabled":
+ inventory_mode = int(-1)
+
+ # watch for - https://support.zabbix.com/browse/ZBX-6033
+ request_str = {'hostid': host_id, 'inventory_mode': inventory_mode}
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.host.update(request_str)
+ except Exception as e:
+ self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e)
def main():
module = AnsibleModule(
argument_spec=dict(
- server_url=dict(required=True, aliases=['url']),
- login_user=dict(required=True),
- login_password=dict(required=True, no_log=True),
- host_name=dict(required=True),
- host_groups=dict(required=False),
- link_templates=dict(required=False),
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ host_name=dict(type='str', required=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ host_groups=dict(type='list', required=False),
+ link_templates=dict(type='list', required=False),
status=dict(default="enabled", choices=['enabled', 'disabled']),
state=dict(default="present", choices=['present', 'absent']),
+ inventory_mode=dict(required=False, choices=['automatic', 'manual', 'disabled']),
timeout=dict(type='int', default=10),
- interfaces=dict(required=False),
- proxy=dict(required=False)
+ interfaces=dict(type='list', required=False),
+ force=dict(type='bool', default=True),
+ proxy=dict(type='str', required=False),
+ visible_name=dict(type='str', required=False)
+
),
supports_check_mode=True
)
@@ -381,13 +457,18 @@ def main():
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
host_name = module.params['host_name']
+ visible_name = module.params['visible_name']
host_groups = module.params['host_groups']
link_templates = module.params['link_templates']
+ inventory_mode = module.params['inventory_mode']
status = module.params['status']
state = module.params['state']
timeout = module.params['timeout']
interfaces = module.params['interfaces']
+ force = module.params['force']
proxy = module.params['proxy']
# convert enabled to 0; disabled to 1
@@ -396,9 +477,9 @@ def main():
zbx = None
# login to zabbix
try:
- zbx = ZabbixAPIExtends(server_url, timeout=timeout)
+ zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
- except Exception, e:
+ except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host = Host(module, zbx)
@@ -418,15 +499,16 @@ def main():
if interface['type'] == 1:
ip = interface['ip']
- proxy_id = "0"
-
- if proxy:
- proxy_id = host.get_proxyid_by_proxy_name(proxy)
-
# check if host exist
is_host_exist = host.is_host_exist(host_name)
if is_host_exist:
+ # Use proxy specified, or set to None when updating host
+ if proxy:
+ proxy_id = host.get_proxyid_by_proxy_name(proxy)
+ else:
+ proxy_id = None
+
# get host id by host name
zabbix_host_obj = host.get_host_by_host_name(host_name)
host_id = zabbix_host_obj['hostid']
@@ -439,6 +521,9 @@ def main():
if not group_ids:
module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
+ if not force:
+ module.fail_json(changed=False, result="Host present, Can't update configuration without force")
+
# get exist host's interfaces
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
exist_interfaces_copy = copy.deepcopy(exist_interfaces)
@@ -448,10 +533,10 @@ def main():
if len(exist_interfaces) > interfaces_len:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
- exist_interfaces, zabbix_host_obj, proxy_id):
+ exist_interfaces, zabbix_host_obj, proxy_id, visible_name):
host.link_or_clear_template(host_id, template_ids)
host.update_host(host_name, group_ids, status, host_id,
- interfaces, exist_interfaces, proxy_id)
+ interfaces, exist_interfaces, proxy_id, visible_name)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
@@ -459,15 +544,26 @@ def main():
module.exit_json(changed=False)
else:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
- exist_interfaces_copy, zabbix_host_obj, proxy_id):
- host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id)
+ exist_interfaces_copy, zabbix_host_obj, proxy_id, visible_name):
+ host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id, visible_name)
host.link_or_clear_template(host_id, template_ids)
+ host.update_inventory_mode(host_id, inventory_mode)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
+ if state == "absent":
+ # the host is already deleted.
+ module.exit_json(changed=False)
+
+ # Use proxy specified, or set to 0 when adding new host
+ if proxy:
+ proxy_id = host.get_proxyid_by_proxy_name(proxy)
+ else:
+ proxy_id = 0
+
if not group_ids:
module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
@@ -475,11 +571,13 @@ def main():
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
# create host
- host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id)
+ host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id, visible_name)
host.link_or_clear_template(host_id, template_ids)
+ host.update_inventory_mode(host_id, inventory_mode)
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
host_name, ip, link_templates))
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py
index e8d65370760..75c552cf229 100644
--- a/monitoring/zabbix_hostmacro.py
+++ b/monitoring/zabbix_hostmacro.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: zabbix_hostmacro
@@ -46,6 +50,18 @@
description:
- Zabbix user password.
required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
host_name:
description:
- Name of the host.
@@ -80,8 +96,8 @@
login_user: username
login_password: password
host_name: ExampleHost
- macro_name:Example macro
- macro_value:Example value
+ macro_name: Example macro
+ macro_value: Example value
state: present
'''
@@ -99,8 +115,8 @@
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far).
class ZabbixAPIExtends(ZabbixAPI):
- def __init__(self, server, timeout, **kwargs):
- ZabbixAPI.__init__(self, server, timeout=timeout)
+ def __init__(self, server, timeout, user, passwd, **kwargs):
+ ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
class HostMacro(object):
@@ -108,11 +124,6 @@ def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
- # exist host
- def is_host_exist(self, host_name):
- result = self._zapi.host.exists({'host': host_name})
- return result
-
# get host id by host name
def get_host_id(self, host_name):
try:
@@ -122,7 +133,7 @@ def get_host_id(self, host_name):
else:
host_id = host_list[0]['hostid']
return host_id
- except Exception, e:
+ except Exception as e:
self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e))
# get host macro
@@ -133,7 +144,7 @@ def get_host_macro(self, macro_name, host_id):
if len(host_macro_list) > 0:
return host_macro_list[0]
return None
- except Exception, e:
+ except Exception as e:
self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e))
# create host macro
@@ -143,18 +154,20 @@ def create_host_macro(self, macro_name, macro_value, host_id):
self._module.exit_json(changed=True)
self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value})
self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name)
- except Exception, e:
+ except Exception as e:
self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e))
# update host macro
def update_host_macro(self, host_macro_obj, macro_name, macro_value):
host_macro_id = host_macro_obj['hostmacroid']
+ if host_macro_obj['macro'] == '{$'+macro_name+'}' and host_macro_obj['value'] == macro_value:
+ self._module.exit_json(changed=False, result="Host macro %s already up to date" % macro_name)
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value})
self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name)
- except Exception, e:
+ except Exception as e:
self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e))
# delete host macro
@@ -165,18 +178,20 @@ def delete_host_macro(self, host_macro_obj, macro_name):
self._module.exit_json(changed=True)
self._zapi.usermacro.delete([host_macro_id])
self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name)
- except Exception, e:
+ except Exception as e:
self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e))
def main():
module = AnsibleModule(
argument_spec=dict(
- server_url=dict(required=True, aliases=['url']),
- login_user=dict(required=True),
- login_password=dict(required=True, no_log=True),
- host_name=dict(required=True),
- macro_name=dict(required=True),
- macro_value=dict(required=True),
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ host_name=dict(type='str', required=True),
+ macro_name=dict(type='str', required=True),
+ macro_value=dict(type='str', required=True),
state=dict(default="present", choices=['present', 'absent']),
timeout=dict(type='int', default=10)
),
@@ -189,6 +204,8 @@ def main():
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
host_name = module.params['host_name']
macro_name = (module.params['macro_name']).upper()
macro_value = module.params['macro_value']
@@ -198,9 +215,9 @@ def main():
zbx = None
# login to zabbix
try:
- zbx = ZabbixAPIExtends(server_url, timeout=timeout)
+ zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
- except Exception, e:
+ except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host_macro_class_obj = HostMacro(module, zbx)
@@ -226,5 +243,6 @@ def main():
host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value)
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/zabbix_maintenance.py b/monitoring/zabbix_maintenance.py
index 2d611382919..4d4c1d972a2 100644
--- a/monitoring/zabbix_maintenance.py
+++ b/monitoring/zabbix_maintenance.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: zabbix_maintenance
@@ -52,6 +56,18 @@
description:
- Zabbix user password.
required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
host_names:
description:
- Hosts to manage maintenance window for.
@@ -91,6 +107,12 @@
- Type of maintenance. With data collection, or without.
required: false
default: "true"
+ timeout:
+ description:
+ - The timeout of API request (seconds).
+ default: 10
+ version_added: "2.1"
+ required: false
notes:
- Useful for setting hosts in maintenance mode before big update,
and removing maintenance window after update.
@@ -104,40 +126,48 @@
EXAMPLES = '''
# Create maintenance window named "Update of www1"
# for host www1.example.com for 90 minutes
-- zabbix_maintenance: name="Update of www1"
- host_name=www1.example.com
- state=present
- minutes=90
- server_url=https://monitoring.example.com
- login_user=ansible
- login_password=pAsSwOrD
+- zabbix_maintenance:
+ name: Update of www1
+ host_name: www1.example.com
+ state: present
+ minutes: 90
+ server_url: 'https://monitoring.example.com'
+ login_user: ansible
+ login_password: pAsSwOrD
# Create maintenance window named "Mass update"
# for host www1.example.com and host groups Office and Dev
-- zabbix_maintenance: name="Update of www1"
- host_name=www1.example.com
- host_groups=Office,Dev
- state=present
- server_url=https://monitoring.example.com
- login_user=ansible
- login_password=pAsSwOrD
+- zabbix_maintenance:
+ name: Update of www1
+ host_name: www1.example.com
+ host_groups:
+ - Office
+ - Dev
+ state: present
+ server_url: 'https://monitoring.example.com'
+ login_user: ansible
+ login_password: pAsSwOrD
# Create maintenance window named "update"
# for hosts www1.example.com and db1.example.com and without data collection.
-- zabbix_maintenance: name=update
- host_names=www1.example.com,db1.example.com
- state=present
- collect_data=false
- server_url=https://monitoring.example.com
- login_user=ansible
- login_password=pAsSwOrD
+- zabbix_maintenance:
+ name: update
+ host_names:
+ - www1.example.com
+ - db1.example.com
+ state: present
+ collect_data: false
+ server_url: 'https://monitoring.example.com'
+ login_user: ansible
+ login_password: pAsSwOrD
# Remove maintenance window named "Test1"
-- zabbix_maintenance: name=Test1
- state=absent
- server_url=https://monitoring.example.com
- login_user=ansible
- login_password=pAsSwOrD
+- zabbix_maintenance:
+ name: Test1
+ state: absent
+ server_url: 'https://monitoring.example.com'
+ login_user: ansible
+ login_password: pAsSwOrD
'''
import datetime
@@ -202,18 +232,6 @@ def delete_maintenance(zbx, maintenance_id):
return 0, None, None
-def check_maintenance(zbx, name):
- try:
- result = zbx.maintenance.exists(
- {
- "name": name
- }
- )
- except BaseException as e:
- return 1, None, str(e)
- return 0, result, None
-
-
def get_group_ids(zbx, host_groups):
group_ids = []
for group in host_groups:
@@ -266,15 +284,18 @@ def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
- server_url=dict(required=True, default=None, aliases=['url']),
+ server_url=dict(type='str', required=True, default=None, aliases=['url']),
host_names=dict(type='list', required=False, default=None, aliases=['host_name']),
minutes=dict(type='int', required=False, default=10),
host_groups=dict(type='list', required=False, default=None, aliases=['host_group']),
- login_user=dict(required=True),
- login_password=dict(required=True, no_log=True),
- name=dict(required=True),
- desc=dict(required=False, default="Created by Ansible"),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ name=dict(type='str', required=True),
+ desc=dict(type='str', required=False, default="Created by Ansible"),
collect_data=dict(type='bool', required=False, default=True),
+ timeout=dict(type='int', default=10),
),
supports_check_mode=True,
)
@@ -287,18 +308,22 @@ def main():
state = module.params['state']
login_user = module.params['login_user']
login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
minutes = module.params['minutes']
name = module.params['name']
desc = module.params['desc']
server_url = module.params['server_url']
collect_data = module.params['collect_data']
+ timeout = module.params['timeout']
+
if collect_data:
maintenance_type = 0
else:
maintenance_type = 1
try:
- zbx = ZabbixAPI(server_url)
+ zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except BaseException as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
@@ -325,11 +350,11 @@ def main():
else:
host_ids = []
- (rc, exists, error) = check_maintenance(zbx, name)
+ (rc, maintenance, error) = get_maintenance_id(zbx, name)
if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error))
- if not exists:
+ if not maintenance:
if not host_names and not host_groups:
module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.")
@@ -344,26 +369,23 @@ def main():
if state == "absent":
- (rc, exists, error) = check_maintenance(zbx, name)
+ (rc, maintenance, error) = get_maintenance_id(zbx, name)
if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error))
- if exists:
- (rc, maintenance, error) = get_maintenance_id(zbx, name)
- if rc != 0:
- module.fail_json(msg="Failed to get maintenance id: %s" % error)
-
- if maintenance:
- if module.check_mode:
+ if maintenance:
+ if module.check_mode:
+ changed = True
+ else:
+ (rc, _, error) = delete_maintenance(zbx, maintenance)
+ if rc == 0:
changed = True
else:
- (rc, _, error) = delete_maintenance(zbx, maintenance)
- if rc == 0:
- changed = True
- else:
- module.fail_json(msg="Failed to remove maintenance: %s" % error)
+ module.fail_json(msg="Failed to remove maintenance: %s" % error)
module.exit_json(changed=changed)
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py
index 1896899c3a3..7e0ade2abe7 100644
--- a/monitoring/zabbix_screen.py
+++ b/monitoring/zabbix_screen.py
@@ -20,6 +20,10 @@
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: zabbix_screen
@@ -48,6 +52,18 @@
description:
- Zabbix user password.
required: true
+ http_login_user:
+ description:
+ - Basic Auth login
+ required: false
+ default: None
+ version_added: "2.1"
+ http_login_password:
+ description:
+ - Basic Auth password
+ required: false
+ default: None
+ version_added: "2.1"
timeout:
description:
- The timeout of API request (seconds).
@@ -142,8 +158,8 @@
class ZabbixAPIExtends(ZabbixAPI):
screenitem = None
- def __init__(self, server, timeout, **kwargs):
- ZabbixAPI.__init__(self, server, timeout=timeout)
+ def __init__(self, server, timeout, user, passwd, **kwargs):
+ ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs))
@@ -315,9 +331,11 @@ def create_screen_items(self, screen_id, hosts, graph_name_list, width, height,
def main():
module = AnsibleModule(
argument_spec=dict(
- server_url=dict(required=True, aliases=['url']),
- login_user=dict(required=True),
- login_password=dict(required=True, no_log=True),
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
timeout=dict(type='int', default=10),
screens=dict(type='list', required=True)
),
@@ -330,15 +348,17 @@ def main():
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
timeout = module.params['timeout']
screens = module.params['screens']
zbx = None
# login to zabbix
try:
- zbx = ZabbixAPIExtends(server_url, timeout=timeout)
+ zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
- except Exception, e:
+ except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
screen = Screen(module, zbx)
diff --git a/network/a10/a10_server.py b/network/a10/a10_server.py
index 2ad66c23588..3a298cb25f4 100644
--- a/network/a10/a10_server.py
+++ b/network/a10/a10_server.py
@@ -3,7 +3,8 @@
"""
Ansible module to manage A10 Networks slb server objects
-(c) 2014, Mischa Peters
+(c) 2014, Mischa Peters ,
+2016, Eric Chou
This file is part of Ansible
@@ -21,57 +22,44 @@
along with Ansible. If not, see .
"""
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: a10_server
version_added: 1.8
-short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
+short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' server object.
description:
- - Manage slb server objects on A10 Networks devices via aXAPI
-author: "Mischa Peters (@mischapeters)"
+ - Manage SLB (Server Load Balancer) server objects on A10 Networks devices via aXAPIv2.
+author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014"
notes:
- - Requires A10 Networks aXAPI 2.1
+ - Requires A10 Networks aXAPI 2.1.
+extends_documentation_fragment: a10
options:
- host:
- description:
- - hostname or ip of your A10 Networks device
- required: true
- default: null
- aliases: []
- choices: []
- username:
- description:
- - admin account of your A10 Networks device
- required: true
- default: null
- aliases: ['user', 'admin']
- choices: []
- password:
+ partition:
+ version_added: "2.3"
description:
- - admin password of your A10 Networks device
- required: true
+ - set active-partition
+ required: false
default: null
- aliases: ['pass', 'pwd']
- choices: []
server_name:
description:
- - slb server name
+ - The SLB (Server Load Balancer) server name.
required: true
- default: null
aliases: ['server']
- choices: []
server_ip:
description:
- - slb server IP address
+ - The SLB server IPv4 address.
required: false
default: null
aliases: ['ip', 'address']
- choices: []
server_status:
description:
- - slb virtual server status
+ - The SLB virtual server status.
required: false
- default: enable
+ default: enabled
aliases: ['status']
choices: ['enabled', 'disabled']
server_ports:
@@ -82,15 +70,25 @@
required when C(state) is C(present).
required: false
default: null
- aliases: []
- choices: []
state:
description:
- - create, update or remove slb server
+ - This is to specify the operation to create, update or remove SLB server.
required: false
default: present
- aliases: []
choices: ['present', 'absent']
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled devices using self-signed certificates.
+ required: false
+ version_added: 2.3
+ default: 'yes'
+ choices: ['yes', 'no']
+
+'''
+
+RETURN = '''
+#
'''
EXAMPLES = '''
@@ -99,6 +97,7 @@
host: a10.mydomain.com
username: myadmin
password: mypassword
+ partition: mypartition
server: test
server_ip: 1.1.1.100
server_ports:
@@ -109,6 +108,15 @@
'''
+RETURN = '''
+content:
+ description: the full info regarding the slb_server
+ returned: success
+ type: string
+ sample: "mynewserver"
+'''
+
+
VALID_PORT_FIELDS = ['port_num', 'protocol', 'status']
def validate_ports(module, ports):
@@ -154,6 +162,7 @@ def main():
server_ip=dict(type='str', aliases=['ip', 'address']),
server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
server_ports=dict(type='list', aliases=['port'], default=[]),
+ partition=dict(type='str', default=[]),
)
)
@@ -163,6 +172,7 @@ def main():
)
host = module.params['host']
+ partition = module.params['partition']
username = module.params['username']
password = module.params['password']
state = module.params['state']
@@ -197,6 +207,8 @@ def main():
if slb_server_status:
json_post['server']['status'] = axapi_enabled_disabled(slb_server_status)
+ slb_server_partition = axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
+
slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server}))
slb_server_exists = not axapi_failure(slb_server_data)
@@ -270,8 +282,8 @@ def status_needs_update(current_status, new_status):
else:
result = dict(msg="the server was not present")
- # if the config has changed, or we want to force a save, save the config unless otherwise requested
- if changed or write_config:
+ # if the config has changed, save the config unless otherwise requested
+ if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
@@ -280,9 +292,12 @@ def status_needs_update(current_status, new_status):
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
-# standard ansible module imports
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.a10 import *
+# ansible module imports
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import url_argument_spec
+from ansible.module_utils.a10 import axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_get_port_protocol, axapi_enabled_disabled
+
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/a10/a10_server_axapi3.py b/network/a10/a10_server_axapi3.py
new file mode 100644
index 00000000000..46f7bf05746
--- /dev/null
+++ b/network/a10/a10_server_axapi3.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to manage A10 Networks slb server objects
+(c) 2014, Mischa Peters , 2016, Eric Chou
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see .
+"""
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: a10_server_axapi3
+version_added: 2.3
+short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
+description:
+ - Manage SLB (Server Load Balancer) server objects on A10 Networks devices via aXAPIv3.
+author: "Eric Chou (@ericchou) based on previous work by Mischa Peters (@mischapeters)"
+extends_documentation_fragment: a10
+options:
+ server_name:
+ description:
+ - The SLB (Server Load Balancer) server name.
+ required: true
+ aliases: ['server']
+ server_ip:
+ description:
+ - The SLB (Server Load Balancer) server IPv4 address.
+ required: true
+ aliases: ['ip', 'address']
+ server_status:
+ description:
+ - The SLB (Server Load Balancer) virtual server status.
+ required: false
+ default: enable
+ aliases: ['action']
+ choices: ['enable', 'disable']
+ server_ports:
+ description:
+ - A list of ports to create for the server. Each list item should be a dictionary which specifies the C(port:)
+ and C(protocol:).
+ required: false
+ default: null
+ operation:
+ description:
+ - Create, Update or Remove SLB server. For create and update operation, we use the IP address and server
+ name specified in the POST message. For delete operation, we use the server name in the request URI.
+ required: false
+ default: create
+ choices: ['create', 'update', 'remove']
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled devices using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+'''
+
+RETURN = '''
+#
+'''
+
+EXAMPLES = '''
+# Create a new server
+- a10_server:
+ host: a10.mydomain.com
+ username: myadmin
+ password: mypassword
+ server: test
+ server_ip: 1.1.1.100
+ validate_certs: false
+ server_status: enable
+ write_config: yes
+ operation: create
+ server_ports:
+ - port-number: 8080
+ protocol: tcp
+ action: enable
+ - port-number: 8443
+ protocol: TCP
+
+'''
+
+VALID_PORT_FIELDS = ['port-number', 'protocol', 'action']
+
+def validate_ports(module, ports):
+ for item in ports:
+ for key in item:
+ if key not in VALID_PORT_FIELDS:
+ module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
+
+ # validate the port number is present and an integer
+ if 'port-number' in item:
+ try:
+ item['port-number'] = int(item['port-number'])
+ except:
+ module.fail_json(msg="port-number entries in the port definitions must be integers")
+ else:
+ module.fail_json(msg="port definitions must define the port-number field")
+
+ # validate the port protocol is present, no need to convert to the internal API integer value in v3
+ if 'protocol' in item:
+ protocol = item['protocol']
+ if not protocol:
+ module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_PORT_PROTOCOLS))
+ else:
+ item['protocol'] = protocol
+ else:
+ module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_PORT_PROTOCOLS))
+
+ # 'status' is 'action' in AXAPIv3
+ # no need to convert the status, a.k.a action, to the internal API integer value in v3
+ # action is either enabled or disabled
+ if 'action' in item:
+ action = item['action']
+ if action not in ['enable', 'disable']:
+ module.fail_json(msg="server action must be enable or disable")
+ else:
+ item['action'] = 'enable'
+
+
+def main():
+ argument_spec = a10_argument_spec()
+ argument_spec.update(url_argument_spec())
+ argument_spec.update(
+ dict(
+ operation=dict(type='str', default='create', choices=['create', 'update', 'delete']),
+ server_name=dict(type='str', aliases=['server'], required=True),
+ server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
+ server_status=dict(type='str', default='enable', aliases=['action'], choices=['enable', 'disable']),
+ server_ports=dict(type='list', aliases=['port'], default=[]),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False
+ )
+
+ host = module.params['host']
+ username = module.params['username']
+ password = module.params['password']
+ operation = module.params['operation']
+ write_config = module.params['write_config']
+ slb_server = module.params['server_name']
+ slb_server_ip = module.params['server_ip']
+ slb_server_status = module.params['server_status']
+ slb_server_ports = module.params['server_ports']
+
+ axapi_base_url = 'https://{}/axapi/v3/'.format(host)
+ axapi_auth_url = axapi_base_url + 'auth/'
+ signature = axapi_authenticate_v3(module, axapi_auth_url, username, password)
+
+ # validate the ports data structure
+ validate_ports(module, slb_server_ports)
+
+
+ json_post = {
+ "server-list": [
+ {
+ "name": slb_server,
+ "host": slb_server_ip
+ }
+ ]
+ }
+
+ # add optional module parameters
+ if slb_server_ports:
+ json_post['server-list'][0]['port-list'] = slb_server_ports
+
+ if slb_server_status:
+ json_post['server-list'][0]['action'] = slb_server_status
+
+ slb_server_data = axapi_call_v3(module, axapi_base_url+'slb/server/', method='GET', body='', signature=signature)
+
+ # for empty slb server list
+ if axapi_failure(slb_server_data):
+ slb_server_exists = False
+ else:
+ slb_server_list = [server['name'] for server in slb_server_data['server-list']]
+ if slb_server in slb_server_list:
+ slb_server_exists = True
+ else:
+ slb_server_exists = False
+
+ changed = False
+ if operation == 'create':
+ if slb_server_exists == False:
+ result = axapi_call_v3(module, axapi_base_url+'slb/server/', method='POST', body=json.dumps(json_post), signature=signature)
+ if axapi_failure(result):
+ module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg'])
+ changed = True
+ else:
+ module.fail_json(msg="server already exists, use state='update' instead")
+ changed = False
+ # if we changed things, get the full info regarding result
+ if changed:
+ result = axapi_call_v3(module, axapi_base_url + 'slb/server/' + slb_server, method='GET', body='', signature=signature)
+ else:
+ result = slb_server_data
+ elif operation == 'delete':
+ if slb_server_exists:
+ result = axapi_call_v3(module, axapi_base_url + 'slb/server/' + slb_server, method='DELETE', body='', signature=signature)
+ if axapi_failure(result):
+ module.fail_json(msg="failed to delete server: %s" % result['response']['err']['msg'])
+ changed = True
+ else:
+ result = dict(msg="the server was not present")
+ elif operation == 'update':
+ if slb_server_exists:
+ result = axapi_call_v3(module, axapi_base_url + 'slb/server/', method='PUT', body=json.dumps(json_post), signature=signature)
+ if axapi_failure(result):
+ module.fail_json(msg="failed to update server: %s" % result['response']['err']['msg'])
+ changed = True
+ else:
+ result = dict(msg="the server was not present")
+
+ # if the config has changed, save the config unless otherwise requested
+ if changed and write_config:
+ write_result = axapi_call_v3(module, axapi_base_url+'write/memory/', method='POST', body='', signature=signature)
+ if axapi_failure(write_result):
+ module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
+
+ # log out gracefully and exit
+ axapi_call_v3(module, axapi_base_url + 'logoff/', method='POST', body='', signature=signature)
+ module.exit_json(changed=changed, content=result)
+
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import url_argument_spec
+from ansible.module_utils.a10 import axapi_call_v3, a10_argument_spec, axapi_authenticate_v3, axapi_failure
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/a10/a10_service_group.py b/network/a10/a10_service_group.py
index af664084b6a..486fcb0b3e1 100644
--- a/network/a10/a10_service_group.py
+++ b/network/a10/a10_service_group.py
@@ -3,7 +3,8 @@
"""
Ansible module to manage A10 Networks slb service-group objects
-(c) 2014, Mischa Peters
+(c) 2014, Mischa Peters ,
+Eric Chou
This file is part of Ansible
@@ -21,56 +22,45 @@
along with Ansible. If not, see .
"""
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: a10_service_group
version_added: 1.8
-short_description: Manage A10 Networks devices' service groups
+short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' service groups.
description:
- - Manage slb service-group objects on A10 Networks devices via aXAPI
-author: "Mischa Peters (@mischapeters)"
+ - Manage SLB (Server Load Balancing) service-group objects on A10 Networks devices via aXAPIv2.
+author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014"
notes:
- - Requires A10 Networks aXAPI 2.1
- - When a server doesn't exist and is added to the service-group the server will be created
+ - Requires A10 Networks aXAPI 2.1.
+ - When a server doesn't exist and is added to the service-group the server will be created.
+extends_documentation_fragment: a10
options:
- host:
+ partition:
+ version_added: "2.3"
description:
- - hostname or ip of your A10 Networks device
- required: true
- default: null
- aliases: []
- choices: []
- username:
- description:
- - admin account of your A10 Networks device
- required: true
- default: null
- aliases: ['user', 'admin']
- choices: []
- password:
- description:
- - admin password of your A10 Networks device
- required: true
+ - set active-partition
+ required: false
default: null
- aliases: ['pass', 'pwd']
- choices: []
service_group:
description:
- - slb service-group name
+ - The SLB (Server Load Balancing) service-group name
required: true
default: null
aliases: ['service', 'pool', 'group']
- choices: []
service_group_protocol:
description:
- - slb service-group protocol
+ - The SLB service-group protocol of TCP or UDP.
required: false
default: tcp
aliases: ['proto', 'protocol']
choices: ['tcp', 'udp']
service_group_method:
description:
- - slb service-group loadbalancing method
+ - The SLB service-group load balancing method, such as round-robin or weighted-rr.
required: false
default: round-robin
aliases: ['method']
@@ -82,17 +72,6 @@
specify the C(status:). See the examples below for details.
required: false
default: null
- aliases: []
- choices: []
- write_config:
- description:
- - If C(yes), any changes will cause a write of the running configuration
- to non-volatile memory. This will save I(all) configuration changes,
- including those that may have been made manually or through other modules,
- so care should be taken when specifying C(yes).
- required: false
- default: "no"
- choices: ["yes", "no"]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
@@ -103,12 +82,17 @@
'''
+RETURN = '''
+#
+'''
+
EXAMPLES = '''
# Create a new service-group
- a10_service_group:
host: a10.mydomain.com
username: myadmin
password: mypassword
+ partition: mypartition
service_group: sg-80-tcp
servers:
- server: foo1.mydomain.com
@@ -123,6 +107,14 @@
'''
+RETURN = '''
+content:
+ description: the full info regarding the slb_service_group
+ returned: success
+ type: string
+ sample: "mynewservicegroup"
+'''
+
VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method']
VALID_SERVER_FIELDS = ['server', 'port', 'status']
@@ -174,6 +166,7 @@ def main():
'src-ip-only-hash',
'src-ip-hash']),
servers=dict(type='list', aliases=['server', 'member'], default=[]),
+ partition=dict(type='str', default=[]),
)
)
@@ -185,6 +178,7 @@ def main():
host = module.params['host']
username = module.params['username']
password = module.params['password']
+ partition = module.params['partition']
state = module.params['state']
write_config = module.params['write_config']
slb_service_group = module.params['service_group']
@@ -226,7 +220,8 @@ def main():
# first we authenticate to get a session id
session_url = axapi_authenticate(module, axapi_base_url, username, password)
-
+ # then we select the active-partition
+ slb_server_partition = axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
# then we check to see if the specified group exists
slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
slb_service_group_exist = not axapi_failure(slb_result)
@@ -334,8 +329,11 @@ def main():
module.exit_json(changed=changed, content=result)
# standard ansible module imports
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.a10 import *
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import url_argument_spec
+from ansible.module_utils.a10 import axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_enabled_disabled
+
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/a10/a10_virtual_server.py b/network/a10/a10_virtual_server.py
index 1a04f1a1754..212e65203ac 100644
--- a/network/a10/a10_virtual_server.py
+++ b/network/a10/a10_virtual_server.py
@@ -3,7 +3,8 @@
"""
Ansible module to manage A10 Networks slb virtual server objects
-(c) 2014, Mischa Peters
+(c) 2014, Mischa Peters ,
+Eric Chou
This file is part of Ansible
@@ -21,56 +22,43 @@
along with Ansible. If not, see .
"""
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: a10_virtual_server
version_added: 1.8
-short_description: Manage A10 Networks devices' virtual servers
+short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' virtual servers.
description:
- - Manage slb virtual server objects on A10 Networks devices via aXAPI
-author: "Mischa Peters (@mischapeters)"
+ - Manage SLB (Server Load Balancing) virtual server objects on A10 Networks devices via aXAPIv2.
+author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014"
notes:
- - Requires A10 Networks aXAPI 2.1
-requirements: []
+ - Requires A10 Networks aXAPI 2.1.
+extends_documentation_fragment: a10
options:
- host:
+ partition:
+ version_added: "2.3"
description:
- - hostname or ip of your A10 Networks device
- required: true
- default: null
- aliases: []
- choices: []
- username:
- description:
- - admin account of your A10 Networks device
- required: true
- default: null
- aliases: ['user', 'admin']
- choices: []
- password:
- description:
- - admin password of your A10 Networks device
- required: true
+ - set active-partition
+ required: false
default: null
- aliases: ['pass', 'pwd']
- choices: []
virtual_server:
description:
- - slb virtual server name
+ - The SLB (Server Load Balancing) virtual server name.
required: true
default: null
aliases: ['vip', 'virtual']
- choices: []
virtual_server_ip:
description:
- - slb virtual server ip address
+ - The SLB virtual server IPv4 address.
required: false
default: null
aliases: ['ip', 'address']
- choices: []
virtual_server_status:
description:
- - slb virtual server status
+ - The SLB virtual server status, such as enabled or disabled.
required: false
default: enable
aliases: ['status']
@@ -82,15 +70,6 @@
specify the C(service_group:) as well as the C(status:). See the examples
below for details. This parameter is required when C(state) is C(present).
required: false
- write_config:
- description:
- - If C(yes), any changes will cause a write of the running configuration
- to non-volatile memory. This will save I(all) configuration changes,
- including those that may have been made manually or through other modules,
- so care should be taken when specifying C(yes).
- required: false
- default: "no"
- choices: ["yes", "no"]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
@@ -101,12 +80,17 @@
'''
+RETURN = '''
+#
+'''
+
EXAMPLES = '''
# Create a new virtual server
- a10_virtual_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
+ partition: mypartition
virtual_server: vserver1
virtual_server_ip: 1.1.1.1
virtual_server_ports:
@@ -122,6 +106,14 @@
'''
+RETURN = '''
+content:
+ description: the full info regarding the slb_virtual
+ returned: success
+ type: string
+ sample: "mynewvirtualserver"
+'''
+
VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status']
def validate_ports(module, ports):
@@ -170,6 +162,7 @@ def main():
virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
virtual_server_ports=dict(type='list', required=True),
+ partition=dict(type='str', default=[]),
)
)
@@ -181,6 +174,7 @@ def main():
host = module.params['host']
username = module.params['username']
password = module.params['password']
+ partition = module.params['partition']
state = module.params['state']
write_config = module.params['write_config']
slb_virtual = module.params['virtual_server']
@@ -196,6 +190,7 @@ def main():
axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
session_url = axapi_authenticate(module, axapi_base_url, username, password)
+ slb_server_partition = axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
slb_virtual_exists = not axapi_failure(slb_virtual_data)
@@ -289,9 +284,11 @@ def needs_update(src_ports, dst_ports):
module.exit_json(changed=changed, content=result)
# standard ansible module imports
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.a10 import *
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import url_argument_spec
+from ansible.module_utils.a10 import axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_enabled_disabled, axapi_get_vport_protocol
+
+
if __name__ == '__main__':
main()
-
diff --git a/network/asa/__init__.py b/network/asa/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/network/asa/asa_acl.py b/network/asa/asa_acl.py
new file mode 100644
index 00000000000..366284155f2
--- /dev/null
+++ b/network/asa/asa_acl.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: asa_acl
+version_added: "2.2"
+author: "Patrick Ogenstad (@ogenstad)"
+short_description: Manage access-lists on a Cisco ASA
+description:
+ - This module allows you to work with access-lists on a Cisco ASA device.
+extends_documentation_fragment: asa
+options:
+ lines:
+ description:
+ - The ordered set of commands that should be configured in the
+ section. The commands must be the exact same commands as found
+ in the device running-config. Be sure to note the configuration
+ command syntax as some commands are automatically modified by the
+ device config parser.
+ required: true
+ before:
+ description:
+ - The ordered set of commands to push on to the command stack if
+ a change needs to be made. This allows the playbook designer
+ the opportunity to perform configuration commands prior to pushing
+ any changes without affecting how the set of commands are matched
+ against the system.
+ required: false
+ default: null
+ after:
+ description:
+ - The ordered set of commands to append to the end of the command
+ stack if a changed needs to be made. Just like with I(before) this
+ allows the playbook designer to append a set of commands to be
+ executed after the command set.
+ required: false
+ default: null
+ match:
+ description:
+ - Instructs the module on the way to perform the matching of
+ the set of commands against the current device config. If
+ match is set to I(line), commands are matched line by line. If
+ match is set to I(strict), command lines are matched with respect
+ to position. Finally if match is set to I(exact), command lines
+ must be an equal match.
+ required: false
+ default: line
+ choices: ['line', 'strict', 'exact']
+ replace:
+ description:
+ - Instructs the module on the way to perform the configuration
+ on the device. If the replace argument is set to I(line) then
+ the modified lines are pushed to the device in configuration
+ mode. If the replace argument is set to I(block) then the entire
+ command block is pushed to the device in configuration mode if any
+ line is not correct.
+ required: false
+ default: line
+ choices: ['line', 'block']
+ force:
+ description:
+ - The force argument instructs the module to not consider the
+ current devices running-config. When set to true, this will
+ cause the module to push the contents of I(src) into the device
+ without first checking if already configured.
+ required: false
+ default: false
+ choices: ['yes', 'no']
+ config:
+ description:
+ - The module, by default, will connect to the remote device and
+ retrieve the current running-config to use as a base for comparing
+ against the contents of source. There are times when it is not
+ desirable to have the task get the current running-config for
+ every task in a playbook. The I(config) argument allows the
+ implementer to pass in the configuruation to use as the base
+ config for comparision.
+ required: false
+ default: null
+"""
+
+EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: cisco
+ password: cisco
+ transport: cli
+ authorize: yes
+ auth_pass: cisco
+
+- asa_acl:
+ lines:
+ - access-list ACL-ANSIBLE extended permit tcp any any eq 82
+ - access-list ACL-ANSIBLE extended permit tcp any any eq www
+ - access-list ACL-ANSIBLE extended permit tcp any any eq 97
+ - access-list ACL-ANSIBLE extended permit tcp any any eq 98
+ - access-list ACL-ANSIBLE extended permit tcp any any eq 99
+ before: clear configure access-list ACL-ANSIBLE
+ match: strict
+ replace: block
+ provider: "{{ cli }}"
+
+- asa_acl:
+ lines:
+ - access-list ACL-OUTSIDE extended permit tcp any any eq www
+ - access-list ACL-OUTSIDE extended permit tcp any any eq https
+ context: customer_a
+ provider: "{{ cli }}"
+"""
+
+RETURN = """
+updates:
+ description: The set of commands that will be pushed to the remote device
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+responses:
+ description: The set of responses from issuing the commands on the device
+ retured: when not check_mode
+ type: list
+ sample: ['...', '...']
+"""
+import ansible.module_utils.asa
+
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.netcfg import NetworkConfig, dumps
+
+
+def get_config(module, acl_name):
+ contents = module.params['config']
+ if not contents:
+ contents = module.config.get_config()
+
+ filtered_config = list()
+ for item in contents.split('\n'):
+ if item.startswith('access-list %s ' % acl_name):
+ filtered_config.append(item)
+
+ return NetworkConfig(indent=1, contents='\n'.join(filtered_config))
+
+def parse_acl_name(module):
+ first_line = True
+ for line in module.params['lines']:
+ ace = line.split()
+ if ace[0] != 'access-list':
+ module.fail_json(msg='All lines/commands must begin with "access-list" %s is not permitted' % ace[0])
+ if len(ace) <= 1:
+ module.fail_json(msg='All lines/commands must contain the name of the access-list')
+ if first_line:
+ acl_name = ace[1]
+ else:
+ if acl_name != ace[1]:
+ module.fail_json(msg='All lines/commands must use the same access-list %s is not %s' % (ace[1], acl_name))
+ first_line = False
+
+ return acl_name
+
+def main():
+
+ argument_spec = dict(
+ lines=dict(aliases=['commands'], required=True, type='list'),
+ before=dict(type='list'),
+ after=dict(type='list'),
+ match=dict(default='line', choices=['line', 'strict', 'exact']),
+ replace=dict(default='line', choices=['line', 'block']),
+ force=dict(default=False, type='bool'),
+ config=dict()
+ )
+
+ module = NetworkModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ lines = module.params['lines']
+
+ before = module.params['before']
+ after = module.params['after']
+
+ match = module.params['match']
+ replace = module.params['replace']
+
+ result = dict(changed=False)
+
+ candidate = NetworkConfig(indent=1)
+ candidate.add(lines)
+
+ acl_name = parse_acl_name(module)
+
+ if not module.params['force']:
+ contents = get_config(module, acl_name)
+ config = NetworkConfig(indent=1, contents=contents)
+
+ commands = candidate.difference(config)
+ commands = dumps(commands, 'commands').split('\n')
+ commands = [str(c) for c in commands if c]
+ else:
+ commands = str(candidate).split('\n')
+
+ if commands:
+ if not module.check_mode:
+ response = module.config(commands)
+ result['responses'] = response
+ result['changed'] = True
+
+ result['updates'] = commands
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/asa/asa_command.py b/network/asa/asa_command.py
new file mode 100644
index 00000000000..3bffcca0425
--- /dev/null
+++ b/network/asa/asa_command.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: asa_command
+version_added: "2.2"
+author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)"
+short_description: Run arbitrary commands on Cisco ASA devices.
+description:
+ - Sends arbitrary commands to an ASA node and returns the results
+ read from the device. The M(asa_command) module includes an
+ argument that will cause the module to wait for a specific condition
+ before returning or timing out if the condition is not met.
+extends_documentation_fragment: asa
+options:
+ commands:
+ description:
+ - List of commands to send to the remote device over the
+ configured provider. The resulting output from the command
+ is returned. If the I(wait_for) argument is provided, the
+ module is not returned until the condition is satisfied or
+ the number of retires as expired.
+ required: true
+ wait_for:
+ description:
+ - List of conditions to evaluate against the output of the
+ command. The task will wait for each condition to be true
+ before moving forward. If the conditional is not true
+ within the configured number of retries, the task fails.
+ See examples.
+ required: false
+ default: null
+ aliases: ['waitfor']
+ match:
+ description:
+ - The I(match) argument is used in conjunction with the
+ I(wait_for) argument to specify the match policy. Valid
+ values are C(all) or C(any). If the value is set to C(all)
+ then all conditionals in the wait_for must be satisfied. If
+ the value is set to C(any) then only one of the values must be
+ satisfied.
+ required: false
+ default: all
+ choices: ['any', 'all']
+ retries:
+ description:
+ - Specifies the number of retries a command should by tried
+ before it is considered failed. The command is run on the
+ target device every retry and evaluated against the
+ I(wait_for) conditions.
+ required: false
+ default: 10
+ interval:
+ description:
+ - Configures the interval in seconds to wait between retries
+ of the command. If the command does not pass the specified
+ conditions, the interval indicates how long to wait before
+ trying the command again.
+ required: false
+ default: 1
+"""
+
+EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: cisco
+ password: cisco
+ authorize: yes
+ auth_pass: cisco
+ transport: cli
+
+
+- asa_command:
+ commands:
+ - show version
+ provider: "{{ cli }}"
+
+- asa_command:
+ commands:
+ - show asp drop
+ - show memory
+ provider: "{{ cli }}"
+
+- asa_command:
+ commands:
+ - show version
+ provider: "{{ cli }}"
+ context: system
+"""
+
+RETURN = """
+stdout:
+ description: the set of responses from the commands
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+stdout_lines:
+ description: The value of stdout split into a list
+ returned: always
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+
+failed_conditions:
+ description: the conditionals that failed
+ retured: failed
+ type: list
+ sample: ['...', '...']
+"""
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcli import CommandRunner
+from ansible.module_utils.netcli import AddCommandError, FailedConditionsError
+from ansible.module_utils.asa import NetworkModule, NetworkError
+
+VALID_KEYS = ['command', 'prompt', 'response']
+
+def to_lines(stdout):
+ for item in stdout:
+ if isinstance(item, basestring):
+ item = str(item).split('\n')
+ yield item
+
+def parse_commands(module):
+ for cmd in module.params['commands']:
+ if isinstance(cmd, basestring):
+ cmd = dict(command=cmd, output=None)
+ elif 'command' not in cmd:
+ module.fail_json(msg='command keyword argument is required')
+ elif not set(cmd.keys()).issubset(VALID_KEYS):
+ module.fail_json(msg='unknown keyword specified')
+ yield cmd
+
+def main():
+ spec = dict(
+ # { command: , prompt: , response: }
+ commands=dict(type='list', required=True),
+
+ wait_for=dict(type='list', aliases=['waitfor']),
+ match=dict(default='all', choices=['all', 'any']),
+
+ retries=dict(default=10, type='int'),
+ interval=dict(default=1, type='int')
+ )
+
+ module = NetworkModule(argument_spec=spec,
+ connect_on_load=False,
+ supports_check_mode=True)
+
+ commands = list(parse_commands(module))
+ conditionals = module.params['wait_for'] or list()
+
+ warnings = list()
+
+ runner = CommandRunner(module)
+
+ for cmd in commands:
+ if module.check_mode and not cmd['command'].startswith('show'):
+ warnings.append('only show commands are supported when using '
+ 'check mode, not executing `%s`' % cmd['command'])
+ else:
+ if cmd['command'].startswith('conf'):
+ module.fail_json(msg='asa_command does not support running '
+ 'config mode commands. Please use '
+ 'asa_config instead')
+ try:
+ runner.add_command(**cmd)
+ except AddCommandError:
+ exc = get_exception()
+ warnings.append('duplicate command detected: %s' % cmd)
+
+ for item in conditionals:
+ runner.add_conditional(item)
+
+ runner.retries = module.params['retries']
+ runner.interval = module.params['interval']
+ runner.match = module.params['match']
+
+ try:
+ runner.run()
+ except FailedConditionsError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+
+ result = dict(changed=False, stdout=list())
+
+ for cmd in commands:
+ try:
+ output = runner.get_command(cmd['command'])
+ except ValueError:
+ output = 'command not executed due to check_mode, see warnings'
+ result['stdout'].append(output)
+
+ result['warnings'] = warnings
+ result['stdout_lines'] = list(to_lines(result['stdout']))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/network/asa/asa_config.py b/network/asa/asa_config.py
new file mode 100644
index 00000000000..ffd082684ec
--- /dev/null
+++ b/network/asa/asa_config.py
@@ -0,0 +1,346 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: asa_config
+version_added: "2.2"
+author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)"
+short_description: Manage Cisco ASA configuration sections
+description:
+ - Cisco ASA configurations use a simple block indent file syntax
+ for segmenting configuration into sections. This module provides
+ an implementation for working with ASA configuration sections in
+ a deterministic way.
+extends_documentation_fragment: asa
+options:
+ lines:
+ description:
+ - The ordered set of commands that should be configured in the
+ section. The commands must be the exact same commands as found
+ in the device running-config. Be sure to note the configuration
+ command syntax as some commands are automatically modified by the
+ device config parser.
+ required: false
+ default: null
+ aliases: ['commands']
+ parents:
+ description:
+ - The ordered set of parents that uniquely identify the section
+ the commands should be checked against. If the parents argument
+ is omitted, the commands are checked against the set of top
+ level or global commands.
+ required: false
+ default: null
+ src:
+ description:
+ - Specifies the source path to the file that contains the configuration
+ or configuration template to load. The path to the source file can
+ either be the full path on the Ansible control host or a relative
+ path from the playbook or role root directory. This argument is mutually
+ exclusive with I(lines).
+ required: false
+ default: null
+ before:
+ description:
+ - The ordered set of commands to push on to the command stack if
+ a change needs to be made. This allows the playbook designer
+ the opportunity to perform configuration commands prior to pushing
+ any changes without affecting how the set of commands are matched
+ against the system
+ required: false
+ default: null
+ after:
+ description:
+ - The ordered set of commands to append to the end of the command
+ stack if a change needs to be made. Just like with I(before) this
+ allows the playbook designer to append a set of commands to be
+ executed after the command set.
+ required: false
+ default: null
+ match:
+ description:
+ - Instructs the module on the way to perform the matching of
+ the set of commands against the current device config. If
+ match is set to I(line), commands are matched line by line. If
+ match is set to I(strict), command lines are matched with respect
+ to position. If match is set to I(exact), command lines
+ must be an equal match. Finally, if match is set to I(none), the
+ module will not attempt to compare the source configuration with
+ the running configuration on the remote device.
+ required: false
+ default: line
+ choices: ['line', 'strict', 'exact', 'none']
+ replace:
+ description:
+ - Instructs the module on the way to perform the configuration
+ on the device. If the replace argument is set to I(line) then
+ the modified lines are pushed to the device in configuration
+ mode. If the replace argument is set to I(block) then the entire
+ command block is pushed to the device in configuration mode if any
+ line is not correct
+ required: false
+ default: line
+ choices: ['line', 'block']
+ update:
+ description:
+ - The I(update) argument controls how the configuration statements
+ are processed on the remote device. Valid choices for the I(update)
+ argument are I(merge) and I(check). When the argument is set to
+ I(merge), the configuration changes are merged with the current
+ device running configuration. When the argument is set to I(check)
+ the configuration updates are determined but not actually configured
+ on the remote device.
+ required: false
+ default: merge
+ choices: ['merge', 'check']
+ commit:
+ description:
+ - This argument specifies the update method to use when applying the
+ configuration changes to the remote node. If the value is set to
+ I(merge) the configuration updates are merged with the running-
+ config. If the value is set to I(check), no changes are made to
+ the remote host.
+ required: false
+ default: merge
+ choices: ['merge', 'check']
+ backup:
+ description:
+ - This argument will cause the module to create a full backup of
+ the current C(running-config) from the remote device before any
+ changes are made. The backup file is written to the C(backup)
+ folder in the playbook root directory. If the directory does not
+ exist, it is created.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ config:
+ description:
+ - The C(config) argument allows the playbook designer to supply
+ the base configuration to be used to validate configuration
+ changes necessary. If this argument is provided, the module
+ will not download the running-config from the remote node.
+ required: false
+ default: null
+ defaults:
+ description:
+ - This argument specifies whether or not to collect all defaults
+ when getting the remote device running config. When enabled,
+ the module will get the current config by issuing the command
+ C(show running-config all).
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ passwords:
+ description:
+ - This argument specifies to include passwords in the config
+ when retrieving the running-config from the remote device. This
+ includes passwords related to VPN endpoints. This argument is
+ mutually exclusive with I(defaults).
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ save:
+ description:
+ - The C(save) argument instructs the module to save the running-
+ config to the startup-config at the conclusion of the module
+ running. If check mode is specified, this argument is ignored.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+"""
+
+EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: cisco
+ password: cisco
+ authorize: yes
+ auth_pass: cisco
+ transport: cli
+
+- asa_config:
+ lines:
+ - network-object host 10.80.30.18
+ - network-object host 10.80.30.19
+ - network-object host 10.80.30.20
+ parents: ['object-group network OG-MONITORED-SERVERS']
+ provider: "{{ cli }}"
+
+- asa_config:
+ host: "{{ inventory_hostname }}"
+ lines:
+ - message-length maximum client auto
+ - message-length maximum 512
+ match: line
+ parents: ['policy-map type inspect dns PM-DNS', 'parameters']
+ authorize: yes
+ auth_pass: cisco
+ username: admin
+ password: cisco
+ context: ansible
+
+- asa_config:
+ lines:
+ - ikev1 pre-shared-key MyS3cretVPNK3y
+ parents: tunnel-group 1.1.1.1 ipsec-attributes
+ passwords: yes
+ provider: "{{ cli }}"
+
+"""
+
+RETURN = """
+updates:
+ description: The set of commands that will be pushed to the remote device
+ returned: always
+ type: list
+ sample: ['...', '...']
+backup_path:
+ description: The full path to the backup file
+ returned: when backup is yes
+ type: path
+ sample: /playbooks/ansible/backup/asa_config.2016-07-16@22:28:34
+responses:
+ description: The set of responses from issuing the commands on the device
+ returned: when not check_mode
+ type: list
+ sample: ['...', '...']
+"""
+import re
+
+import ansible.module_utils.asa
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.netcfg import NetworkConfig, dumps
+
+def get_config(module):
+ contents = module.params['config']
+ if not contents:
+ if module.params['defaults']:
+ include = 'defaults'
+ elif module.params['passwords']:
+ include = 'passwords'
+ else:
+ include = None
+ contents = module.config.get_config(include=include)
+ return NetworkConfig(indent=1, contents=contents)
+
+def get_candidate(module):
+ candidate = NetworkConfig(indent=1)
+ if module.params['src']:
+ candidate.load(module.params['src'])
+ elif module.params['lines']:
+ parents = module.params['parents'] or list()
+ candidate.add(module.params['lines'], parents=parents)
+ return candidate
+
+def run(module, result):
+ match = module.params['match']
+ replace = module.params['replace']
+ path = module.params['parents']
+
+ candidate = get_candidate(module)
+
+ if match != 'none':
+ config = get_config(module)
+ configobjs = candidate.difference(config, path=path, match=match,
+ replace=replace)
+ else:
+ configobjs = candidate.items
+
+ if configobjs:
+ commands = dumps(configobjs, 'commands').split('\n')
+
+ if module.params['lines']:
+ if module.params['before']:
+ commands[:0] = module.params['before']
+
+ if module.params['after']:
+ commands.extend(module.params['after'])
+
+ result['updates'] = commands
+
+ # send the configuration commands to the device and merge
+ # them with the current running config
+ if not module.check_mode:
+ module.config.load_config(commands)
+ result['changed'] = True
+
+ if module.params['save']:
+ if not module.check_mode:
+ module.config.save_config()
+ result['changed'] = True
+
+def main():
+ """ main entry point for module execution
+ """
+ argument_spec = dict(
+ src=dict(type='path'),
+
+ lines=dict(aliases=['commands'], type='list'),
+ parents=dict(type='list'),
+
+ before=dict(type='list'),
+ after=dict(type='list'),
+
+ match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
+ replace=dict(default='line', choices=['line', 'block']),
+
+ config=dict(),
+ defaults=dict(type='bool', default=False),
+ passwords=dict(type='bool', default=False),
+
+ backup=dict(type='bool', default=False),
+ save=dict(type='bool', default=False),
+ )
+
+ mutually_exclusive = [('lines', 'src'), ('defaults', 'passwords')]
+
+ required_if = [('match', 'strict', ['lines']),
+ ('match', 'exact', ['lines']),
+ ('replace', 'block', ['lines'])]
+
+ module = NetworkModule(argument_spec=argument_spec,
+ connect_on_load=False,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ result = dict(changed=False)
+
+ if module.params['backup']:
+ result['__backup__'] = module.config.get_config()
+
+ try:
+ run(module, result)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), **exc.kwargs)
+
+ module.exit_json(**result)
+
+if __name__ == '__main__':
+ main()
diff --git a/network/citrix/netscaler.py b/network/citrix/netscaler.py
index 384a625bdca..30442ade78c 100644
--- a/network/citrix/netscaler.py
+++ b/network/citrix/netscaler.py
@@ -21,6 +21,10 @@
along with Ansible. If not, see .
"""
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: netscaler
@@ -87,13 +91,26 @@
EXAMPLES = '''
# Disable the server
-ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass"
+- netscaler:
+ nsc_host: nsc.example.com
+ user: apiuser
+ password: apipass
# Enable the server
-ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass action=enable"
+- netscaler:
+ nsc_host: nsc.example.com
+ user: apiuser
+ password: apipass
+ action: enable
# Disable the service local:8080
-ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass name=local:8080 type=service action=disable"
+- netscaler:
+ nsc_host: nsc.example.com
+ user: apiuser
+ password: apipass
+ name: 'local:8080'
+ type: service
+ action: disable
'''
@@ -173,7 +190,8 @@ def main():
rc = 0
try:
rc, result = core(module)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg=str(e))
if rc != 0:
@@ -186,4 +204,7 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/network/cloudflare_dns.py b/network/cloudflare_dns.py
new file mode 100644
index 00000000000..621e92ac1f0
--- /dev/null
+++ b/network/cloudflare_dns.py
@@ -0,0 +1,672 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016 Michael Gruener
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cloudflare_dns
+author: "Michael Gruener (@mgruener)"
+requirements:
+ - "python >= 2.6"
+version_added: "2.1"
+short_description: manage Cloudflare DNS records
+description:
+ - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)"
+options:
+ account_api_token:
+ description:
+ - "Account API token. You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://www.cloudflare.com/a/account)"
+ required: true
+ account_email:
+ description:
+ - "Account email."
+ required: true
+ port:
+ description: Service port. Required for C(type=SRV)
+ required: false
+ default: null
+ priority:
+ description: Record priority. Required for C(type=MX) and C(type=SRV)
+ required: false
+ default: "1"
+ proto:
+ description: Service protocol. Required for C(type=SRV)
+ required: false
+ choices: [ 'tcp', 'udp' ]
+ default: null
+ proxied:
+ description: Proxy through cloudflare network or just use DNS
+ required: false
+ default: no
+ version_added: "2.3"
+ record:
+ description:
+ - Record to add. Required if C(state=present). Default is C(@) (e.g. the zone name)
+ required: false
+ default: "@"
+ aliases: [ "name" ]
+ service:
+ description: Record service. Required for C(type=SRV)
+ required: false
+ default: null
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name. Only use with C(state=present)
+ - This will delete all other records with the same record name and type.
+ required: false
+ default: null
+ state:
+ description:
+ - Whether the record(s) should exist or not
+ required: false
+ choices: [ 'present', 'absent' ]
+ default: present
+ timeout:
+ description:
+ - Timeout for Cloudflare API calls
+ required: false
+ default: 30
+ ttl:
+ description:
+ - The TTL to give the new record. Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
+ required: false
+ default: 1 (automatic)
+ type:
+ description:
+ - The type of DNS record to create. Required if C(state=present)
+ required: false
+ choices: [ 'A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF' ]
+ default: null
+ value:
+ description:
+ - The record value. Required for C(state=present)
+ required: false
+ default: null
+ aliases: [ "content" ]
+ weight:
+ description: Service weight. Required for C(type=SRV)
+ required: false
+ default: "1"
+ zone:
+ description:
+ - The name of the Zone to work with (e.g. "example.com"). The Zone must already exist.
+ required: true
+ aliases: ["domain"]
+'''
+
+EXAMPLES = '''
+# create a test.my.com A record to point to 127.0.0.1
+- cloudflare_dns:
+ zone: my.com
+ record: test
+ type: A
+ value: 127.0.0.1
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+ register: record
+
+# create a my.com CNAME record to example.com
+- cloudflare_dns:
+ zone: my.com
+ type: CNAME
+ value: example.com
+ state: present
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+
+# change it's ttl
+- cloudflare_dns:
+ zone: my.com
+ type: CNAME
+ value: example.com
+ ttl: 600
+ state: present
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+
+# and delete the record
+- cloudflare_dns:
+ zone: my.com
+ type: CNAME
+ value: example.com
+ state: absent
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+
+# create a my.com CNAME record to example.com and proxy through cloudflare's network
+- cloudflare_dns:
+ zone: my.com
+ type: CNAME
+ value: example.com
+ state: present
+ proxied: yes
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+
+# create TXT record "test.my.com" with value "unique value"
+# delete all other TXT records named "test.my.com"
+- cloudflare_dns:
+ domain: my.com
+ record: test
+ type: TXT
+ value: unique value
+ state: present
+ solo: true
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+
+# create a SRV record _foo._tcp.my.com
+- cloudflare_dns:
+ domain: my.com
+ service: foo
+ proto: tcp
+ port: 3500
+ priority: 10
+ weight: 20
+ type: SRV
+ value: fooserver.my.com
+'''
+
+RETURN = '''
+record:
+ description: dictionary containing the record data
+ returned: success, except on record deletion
+ type: dictionary
+ contains:
+ content:
+ description: the record content (details depend on record type)
+ returned: success
+ type: string
+ sample: 192.0.2.91
+ created_on:
+ description: the record creation date
+ returned: success
+ type: string
+ sample: 2016-03-25T19:09:42.516553Z
+ data:
+ description: additional record data
+ returned: success, if type is SRV
+ type: dictionary
+ sample: {
+ name: "jabber",
+ port: 8080,
+ priority: 10,
+ proto: "_tcp",
+ service: "_xmpp",
+ target: "jabberhost.sample.com",
+ weight: 5,
+ }
+ id:
+ description: the record id
+ returned: success
+ type: string
+ sample: f9efb0549e96abcb750de63b38c9576e
+ locked:
+ description: No documentation available
+ returned: success
+ type: boolean
+ sample: False
+ meta:
+ description: No documentation available
+ returned: success
+ type: dictionary
+ sample: { auto_added: false }
+ modified_on:
+ description: record modification date
+ returned: success
+ type: string
+ sample: 2016-03-25T19:09:42.516553Z
+ name:
+ description: the record name as FQDN (including _service and _proto for SRV)
+ returned: success
+ type: string
+ sample: www.sample.com
+ priority:
+ description: priority of the MX record
+ returned: success, if type is MX
+ type: int
+ sample: 10
+ proxiable:
+ description: whether this record can be proxied through cloudflare
+ returned: success
+ type: boolean
+ sample: False
+ proxied:
+ description: whether the record is proxied through cloudflare
+ returned: success
+ type: boolean
+ sample: False
+ ttl:
+ description: the time-to-live for the record
+ returned: success
+ type: int
+ sample: 300
+ type:
+ description: the record type
+ returned: success
+ type: string
+ sample: A
+ zone_id:
+ description: the id of the zone containing the record
+ returned: success
+ type: string
+ sample: abcede0bf9f0066f94029d2e6b73856a
+ zone_name:
+ description: the name of the zone containing the record
+ returned: success
+ type: string
+ sample: sample.com
+'''
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+import urllib
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import fetch_url
+
+
+class CloudflareAPI(object):
+
+ cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
+ changed = False
+
+ def __init__(self, module):
+ self.module = module
+ self.account_api_token = module.params['account_api_token']
+ self.account_email = module.params['account_email']
+ self.port = module.params['port']
+ self.priority = module.params['priority']
+ self.proto = module.params['proto']
+ self.proxied = module.params['proxied']
+ self.record = module.params['record']
+ self.service = module.params['service']
+ self.is_solo = module.params['solo']
+ self.state = module.params['state']
+ self.timeout = module.params['timeout']
+ self.ttl = module.params['ttl']
+ self.type = module.params['type']
+ self.value = module.params['value']
+ self.weight = module.params['weight']
+ self.zone = module.params['zone']
+
+ if self.record == '@':
+ self.record = self.zone
+
+ if (self.type in ['CNAME','NS','MX','SRV']) and (self.value is not None):
+ self.value = self.value.rstrip('.')
+
+ if (self.type == 'SRV'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.service is not None) and (not self.service.startswith('_')):
+ self.service = '_' + self.service
+
+ if not self.record.endswith(self.zone):
+ self.record = self.record + '.' + self.zone
+
+ def _cf_simple_api_call(self,api_call,method='GET',payload=None):
+ headers = { 'X-Auth-Email': self.account_email,
+ 'X-Auth-Key': self.account_api_token,
+ 'Content-Type': 'application/json' }
+ data = None
+ if payload:
+ try:
+ data = json.dumps(payload)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg="Failed to encode payload as JSON: %s " % str(e))
+
+ resp, info = fetch_url(self.module,
+ self.cf_api_endpoint + api_call,
+ headers=headers,
+ data=data,
+ method=method,
+ timeout=self.timeout)
+
+ if info['status'] not in [200,304,400,401,403,429,405,415]:
+ self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}".format(api_call,info['status']))
+
+ error_msg = ''
+ if info['status'] == 401:
+ # Unauthorized
+ error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ elif info['status'] == 403:
+ # Forbidden
+ error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ elif info['status'] == 429:
+ # Too many requests
+ error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ elif info['status'] == 405:
+ # Method not allowed
+ error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ elif info['status'] == 415:
+ # Unsupported Media Type
+ error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+ elif info ['status'] == 400:
+ # Bad Request
+ error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call)
+
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ if info['body']:
+ content = info['body']
+ else:
+ error_msg += "; The API response was empty"
+
+ if content:
+ try:
+ result = json.loads(content)
+ except json.JSONDecodeError:
+ error_msg += "; Failed to parse API response: {0}".format(content)
+
+ # received an error status but no data with details on what failed
+ if (info['status'] not in [200,304]) and (result is None):
+ self.module.fail_json(msg=error_msg)
+
+ if not result['success']:
+ error_msg += "; Error details: "
+ for error in result['errors']:
+ error_msg += "code: {0}, error: {1}; ".format(error['code'],error['message'])
+ if 'error_chain' in error:
+ for chain_error in error['error_chain']:
+ error_msg += "code: {0}, error: {1}; ".format(chain_error['code'],chain_error['message'])
+ self.module.fail_json(msg=error_msg)
+
+ return result, info['status']
+
+ def _cf_api_call(self,api_call,method='GET',payload=None):
+ result, status = self._cf_simple_api_call(api_call,method,payload)
+
+ data = result['result']
+
+ if 'result_info' in result:
+ pagination = result['result_info']
+ if pagination['total_pages'] > 1:
+ next_page = int(pagination['page']) + 1
+ parameters = ['page={0}'.format(next_page)]
+ # strip "page" parameter from call parameters (if there are any)
+ if '?' in api_call:
+ raw_api_call,query = api_call.split('?',1)
+ parameters += [param for param in query.split('&') if not param.startswith('page')]
+ else:
+ raw_api_call = api_call
+ while next_page <= pagination['total_pages']:
+ raw_api_call += '?' + '&'.join(parameters)
+ result, status = self._cf_simple_api_call(raw_api_call,method,payload)
+ data += result['result']
+ next_page += 1
+
+ return data, status
+
+ def _get_zone_id(self,zone=None):
+ if not zone:
+ zone = self.zone
+
+ zones = self.get_zones(zone)
+ if len(zones) > 1:
+ self.module.fail_json(msg="More than one zone matches {0}".format(zone))
+
+ if len(zones) < 1:
+ self.module.fail_json(msg="No zone found with name {0}".format(zone))
+
+ return zones[0]['id']
+
+ def get_zones(self,name=None):
+ if not name:
+ name = self.zone
+ param = ''
+ if name:
+ param = '?' + urllib.urlencode({'name' : name})
+ zones,status = self._cf_api_call('/zones' + param)
+ return zones
+
+ def get_dns_records(self,zone_name=None,type=None,record=None,value=''):
+ if not zone_name:
+ zone_name = self.zone
+ if not type:
+ type = self.type
+ if not record:
+ record = self.record
+ # necessary because None as value means to override user
+ # set module value
+ if (not value) and (value is not None):
+ value = self.value
+
+ zone_id = self._get_zone_id()
+ api_call = '/zones/{0}/dns_records'.format(zone_id)
+ query = {}
+ if type:
+ query['type'] = type
+ if record:
+ query['name'] = record
+ if value:
+ query['content'] = value
+ if query:
+ api_call += '?' + urllib.urlencode(query)
+
+ records,status = self._cf_api_call(api_call)
+ return records
+
+ def delete_dns_records(self,**kwargs):
+ params = {}
+ for param in ['port','proto','service','solo','type','record','value','weight','zone']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self,param)
+
+ records = []
+ content = params['value']
+ search_record = params['record']
+ if params['type'] == 'SRV':
+ content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+ if params['solo']:
+ search_value = None
+ else:
+ search_value = content
+
+ records = self.get_dns_records(params['zone'],params['type'],search_record,search_value)
+
+ for rr in records:
+ if params['solo']:
+ if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'],rr['id']),'DELETE')
+ else:
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'],rr['id']),'DELETE')
+ return self.changed
+
+ def ensure_dns_record(self,**kwargs):
+ params = {}
+ for param in ['port','priority','proto','proxied','service','ttl','type','record','value','weight','zone']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self,param)
+
+ search_value = params['value']
+ search_record = params['record']
+ new_record = None
+ if (params['type'] is None) or (params['record'] is None):
+ self.module.fail_json(msg="You must provide a type and a record to create a new record")
+
+ if (params['type'] in [ 'A','AAAA','CNAME','TXT','MX','NS','SPF']):
+ if not params['value']:
+ self.module.fail_json(msg="You must provide a non-empty value to create this record type")
+
+ # there can only be one CNAME per record
+ # ignoring the value when searching for existing
+ # CNAME records allows us to update the value if it
+ # changes
+ if params['type'] == 'CNAME':
+ search_value = None
+
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "ttl": params['ttl']
+ }
+
+ if (params['type'] in [ 'A', 'AAAA', 'CNAME' ]):
+ new_record["proxied"] = params["proxied"]
+
+ if params['type'] == 'MX':
+ for attr in [params['priority'],params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide priority and a value to create this record type")
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "priority": params['priority'],
+ "ttl": params['ttl']
+ }
+
+ if params['type'] == 'SRV':
+ for attr in [params['port'],params['priority'],params['proto'],params['service'],params['weight'],params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
+ srv_data = {
+ "target": params['value'],
+ "port": params['port'],
+ "weight": params['weight'],
+ "priority": params['priority'],
+ "name": params['record'][:-len('.' + params['zone'])],
+ "proto": params['proto'],
+ "service": params['service']
+ }
+ new_record = { "type": params['type'], "ttl": params['ttl'], 'data': srv_data }
+ search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+
+ zone_id = self._get_zone_id(params['zone'])
+ records = self.get_dns_records(params['zone'],params['type'],search_record,search_value)
+ # in theory this should be impossible as cloudflare does not allow
+ # the creation of duplicate records but lets cover it anyways
+ if len(records) > 1:
+ self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
+ # record already exists, check if it must be updated
+ if len(records) == 1:
+ cur_record = records[0]
+ do_update = False
+ if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl'] ):
+ do_update = True
+ if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
+ do_update = True
+ if ('data' in new_record) and ('data' in cur_record):
+ if (cur_record['data'] > new_record['data']) - (cur_record['data'] < new_record['data']):
+ do_update = True
+ if (type == 'CNAME') and (cur_record['content'] != new_record['content']):
+ do_update = True
+ if do_update:
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id,records[0]['id']),'PUT',new_record)
+ self.changed = True
+ return result,self.changed
+ else:
+ return records,self.changed
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id),'POST',new_record)
+ self.changed = True
+ return result,self.changed
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ account_api_token = dict(required=True, no_log=True, type='str'),
+ account_email = dict(required=True, type='str'),
+ port = dict(required=False, default=None, type='int'),
+ priority = dict(required=False, default=1, type='int'),
+ proto = dict(required=False, default=None, choices=[ 'tcp', 'udp' ], type='str'),
+ proxied = dict(required=False, default=False, type='bool'),
+ record = dict(required=False, default='@', aliases=['name'], type='str'),
+ service = dict(required=False, default=None, type='str'),
+ solo = dict(required=False, default=None, type='bool'),
+ state = dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ timeout = dict(required=False, default=30, type='int'),
+ ttl = dict(required=False, default=1, type='int'),
+ type = dict(required=False, default=None, choices=[ 'A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF' ], type='str'),
+ value = dict(required=False, default=None, aliases=['content'], type='str'),
+ weight = dict(required=False, default=1, type='int'),
+ zone = dict(required=True, default=None, aliases=['domain'], type='str'),
+ ),
+ supports_check_mode = True,
+ required_if = ([
+ ('state','present',['record','type']),
+ ('type','MX',['priority','value']),
+ ('type','SRV',['port','priority','proto','service','value','weight']),
+ ('type','A',['value']),
+ ('type','AAAA',['value']),
+ ('type','CNAME',['value']),
+ ('type','TXT',['value']),
+ ('type','NS',['value']),
+ ('type','SPF',['value'])
+ ]
+ ),
+ required_one_of = (
+ [['record','value','type']]
+ )
+ )
+
+ changed = False
+ cf_api = CloudflareAPI(module)
+
+ # sanity checks
+ if cf_api.is_solo and cf_api.state == 'absent':
+ module.fail_json(msg="solo=true can only be used with state=present")
+
+ # perform add, delete or update (only the TTL can be updated) of one or
+ # more records
+ if cf_api.state == 'present':
+ # delete all records matching record name + type
+ if cf_api.is_solo:
+ changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
+ result,changed = cf_api.ensure_dns_record()
+ if isinstance(result,list):
+ module.exit_json(changed=changed,result={'record': result[0]})
+ else:
+ module.exit_json(changed=changed,result={'record': result})
+ else:
+ # force solo to False, just to be sure
+ changed = cf_api.delete_dns_records(solo=False)
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/dnsimple.py b/network/dnsimple.py
index 5cecfbd8169..3f6c2188b04 100644
--- a/network/dnsimple.py
+++ b/network/dnsimple.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: dnsimple
@@ -97,36 +101,67 @@
'''
EXAMPLES = '''
-# authenticate using email and API token
-- local_action: dnsimple account_email=test@example.com account_api_token=dummyapitoken
-
-# fetch all domains
-- local_action dnsimple
- register: domains
+# authenticate using email and API token and fetch all domains
+- dnsimple:
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+ delegate_to: localhost
# fetch my.com domain records
-- local_action: dnsimple domain=my.com state=present
+- dnsimple:
+ domain: my.com
+ state: present
+ delegate_to: localhost
register: records
# delete a domain
-- local_action: dnsimple domain=my.com state=absent
+- dnsimple:
+ domain: my.com
+ state: absent
+ delegate_to: localhost
# create a test.my.com A record to point to 127.0.0.01
-- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1
+- dnsimple:
+ domain: my.com
+ record: test
+ type: A
+ value: 127.0.0.1
+ delegate_to: localhost
register: record
# and then delete it
-- local_action: dnsimple domain=my.com record_ids={{ record['id'] }}
+- dnsimple:
+ domain: my.com
+ record_ids: '{{ record["id"] }}'
+ delegate_to: localhost
# create a my.com CNAME record to example.com
-- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present
+- dnsimple
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: present
+ delegate_to: localhost
# change it's ttl
-- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present
+- dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ ttl: 600
+ state: present
+ delegate_to: localhost
# and delete the record
-- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent
-
+- dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: absent
+ delegate_to: localhost
'''
import os
@@ -159,7 +194,7 @@ def main():
)
if not HAS_DNSIMPLE:
- module.fail_json("dnsimple required for this module")
+ module.fail_json(msg="dnsimple required for this module")
account_email = module.params.get('account_email')
account_api_token = module.params.get('account_api_token')
@@ -294,12 +329,15 @@ def main():
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
- except DNSimpleException, e:
+ except DNSimpleException:
+ e = get_exception()
module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
module.fail_json(msg="Unknown what you wanted me to do")
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py
index cce7bd10082..7650960e434 100644
--- a/network/dnsmadeeasy.py
+++ b/network/dnsmadeeasy.py
@@ -14,23 +14,27 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: dnsmadeeasy
version_added: "1.3"
short_description: Interface with dnsmadeeasy.com (a DNS hosting service).
description:
- - "Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or monitor/account support yet. See: U(http://www.dnsmadeeasy.com/services/rest-api/)"
+ - "Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)"
options:
account_key:
description:
- - Accout API Key.
+ - Account API Key.
required: true
default: null
account_secret:
description:
- - Accout Secret Key.
+ - Account Secret Key.
required: true
default: null
@@ -92,21 +96,48 @@
EXAMPLES = '''
# fetch my.com domain records
-- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present
+- dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
register: response
# create / ensure the presence of a record
-- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_type="A" record_value="127.0.0.1"
+- dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
# update the previously created record
-- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_value="192.168.0.1"
+- dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_value: 192.0.2.23
# fetch a specific record
-- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test"
+- dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
register: response
# delete a record / ensure it is absent
-- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=absent record_name="test"
+- dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: absent
+ record_name: test
'''
# ============================================
@@ -121,7 +152,8 @@
from time import strftime, gmtime
import hashlib
import hmac
-except ImportError, e:
+except ImportError:
+ e = get_exception()
IMPORT_ERROR = str(e)
class DME2:
@@ -170,7 +202,7 @@ def query(self, resource, method, data=None):
try:
return json.load(response)
- except Exception, e:
+ except Exception:
return {}
def getDomain(self, domain_id):
@@ -204,16 +236,17 @@ def getMatchingRecord(self, record_name, record_type, record_value):
if not self.all_records:
self.all_records = self.getRecords()
- # TODO SRV type not yet implemented
if record_type in ["A", "AAAA", "CNAME", "HTTPRED", "PTR"]:
for result in self.all_records:
if result['name'] == record_name and result['type'] == record_type:
return result
return False
- elif record_type in ["MX", "NS", "TXT"]:
+ elif record_type in ["MX", "NS", "TXT", "SRV"]:
for result in self.all_records:
if record_type == "MX":
value = record_value.split(" ")[1]
+ elif record_type == "SRV":
+ value = record_value.split(" ")[3]
else:
value = record_value
if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
@@ -309,6 +342,13 @@ def main():
new_record["mxLevel"] = new_record["value"].split(" ")[0]
new_record["value"] = new_record["value"].split(" ")[1]
+ # Special handling for SRV records
+ if new_record["type"] == "SRV":
+ new_record["priority"] = new_record["value"].split(" ")[0]
+ new_record["weight"] = new_record["value"].split(" ")[1]
+ new_record["port"] = new_record["value"].split(" ")[2]
+ new_record["value"] = new_record["value"].split(" ")[3]
+
# Compare new record against existing one
changed = False
if current_record:
@@ -357,4 +397,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/exoscale/__init__.py b/network/exoscale/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/network/exoscale/exo_dns_domain.py b/network/exoscale/exo_dns_domain.py
new file mode 100644
index 00000000000..b0046c803dc
--- /dev/null
+++ b/network/exoscale/exo_dns_domain.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: exo_dns_domain
+short_description: Manages domain records on Exoscale DNS API.
+description:
+ - Create and remove domain records.
+version_added: "2.2"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the record.
+ required: true
+ state:
+ description:
+ - State of the resource.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ api_key:
+ description:
+ - API key of the Exoscale DNS API.
+ required: false
+ default: null
+ api_secret:
+ description:
+ - Secret key of the Exoscale DNS API.
+ required: false
+ default: null
+ api_timeout:
+ description:
+ - HTTP timeout to Exoscale DNS API.
+ required: false
+ default: 10
+ api_region:
+ description:
+ - Name of the ini section in the C(cloustack.ini) file.
+ required: false
+ default: cloudstack
+ validate_certs:
+ description:
+ - Validate SSL certs of the Exoscale DNS API.
+ required: false
+ default: true
+requirements:
+ - "python >= 2.6"
+notes:
+ - As Exoscale DNS uses the same API key and secret for all services, we reuse the config used for Exscale Compute based on CloudStack.
+ The config is read from several locations, in the following order.
+ The C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) environment variables.
+ A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file,
+ A C(cloudstack.ini) file in the current working directory.
+ A C(.cloudstack.ini) file in the users home directory.
+ Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini).
+ Use the argument C(api_region) to select the section name, default section is C(cloudstack).
+ - This module does not support multiple A records and will complain properly if you try.
+ - More information Exoscale DNS can be found on https://community.exoscale.ch/documentation/dns/.
+ - This module supports check mode and diff.
+'''
+
+EXAMPLES = '''
+# Create a domain.
+- local_action:
+ module: exo_dns_domain
+ name: example.com
+
+# Remove a domain.
+- local_action:
+ module: exo_dns_domain
+ name: example.com
+ state: absent
+'''
+
+RETURN = '''
+---
+exo_dns_domain:
+ description: API domain results
+ returned: success
+ type: dictionary
+ contains:
+ account_id:
+ description: Your account ID
+ returned: success
+ type: int
+ sample: 34569
+ auto_renew:
+ description: Whether domain is auto renewed or not
+ returned: success
+ type: bool
+ sample: false
+ created_at:
+ description: When the domain was created
+ returned: success
+ type: string
+ sample: "2016-08-12T15:24:23.989Z"
+ expires_on:
+ description: When the domain expires
+ returned: success
+ type: string
+ sample: "2016-08-12T15:24:23.989Z"
+ id:
+ description: ID of the domain
+ returned: success
+ type: int
+ sample: "2016-08-12T15:24:23.989Z"
+ lockable:
+ description: Whether the domain is lockable or not
+ returned: success
+ type: bool
+ sample: true
+ name:
+ description: Domain name
+ returned: success
+ type: string
+ sample: example.com
+ record_count:
+ description: Number of records related to this domain
+ returned: success
+ type: int
+ sample: 5
+ registrant_id:
+ description: ID of the registrant
+ returned: success
+ type: int
+ sample: null
+ service_count:
+ description: Number of services
+ returned: success
+ type: int
+ sample: 0
+ state:
+ description: State of the domain
+ returned: success
+ type: string
+ sample: "hosted"
+ token:
+ description: Token
+ returned: success
+ type: string
+ sample: "r4NzTRp6opIeFKfaFYvOd6MlhGyD07jl"
+ unicode_name:
+ description: Domain name as unicode
+ returned: success
+ type: string
+ sample: "example.com"
+ updated_at:
+ description: When the domain was updated last.
+ returned: success
+ type: string
+ sample: "2016-08-12T15:24:23.989Z"
+ user_id:
+ description: ID of the user
+ returned: success
+ type: int
+ sample: null
+ whois_protected:
+ description: Wheter the whois is protected or not
+ returned: success
+ type: bool
+ sample: false
+'''
+
+# import exoscale common
+from ansible.module_utils.exoscale import *
+
+
+class ExoDnsDomain(ExoDns):
+
+ def __init__(self, module):
+ super(ExoDnsDomain, self).__init__(module)
+ self.name = self.module.params.get('name').lower()
+
+ def get_domain(self):
+ domains = self.api_query("/domains", "GET")
+ for z in domains:
+ if z['domain']['name'].lower() == self.name:
+ return z
+ return None
+
+ def present_domain(self):
+ domain = self.get_domain()
+ data = {
+ 'domain': {
+ 'name': self.name,
+ }
+ }
+ if not domain:
+ self.result['diff']['after'] = data['domain']
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ domain = self.api_query("/domains", "POST", data)
+ return domain
+
+ def absent_domain(self):
+ domain = self.get_domain()
+ if domain:
+ self.result['diff']['before'] = domain
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.api_query("/domains/%s" % domain['domain']['name'], "DELETE")
+ return domain
+
+ def get_result(self, resource):
+ if resource:
+ self.result['exo_dns_domain'] = resource['domain']
+ return self.result
+
+
+def main():
+ argument_spec = exo_dns_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=exo_dns_required_together(),
+ supports_check_mode=True
+ )
+
+ exo_dns_domain = ExoDnsDomain(module)
+ if module.params.get('state') == "present":
+ resource = exo_dns_domain.present_domain()
+ else:
+ resource = exo_dns_domain.absent_domain()
+ result = exo_dns_domain.get_result(resource)
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/exoscale/exo_dns_record.py b/network/exoscale/exo_dns_record.py
new file mode 100644
index 00000000000..495508d3d47
--- /dev/null
+++ b/network/exoscale/exo_dns_record.py
@@ -0,0 +1,395 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: exo_dns_record
+short_description: Manages DNS records on Exoscale DNS.
+description:
+ - Create, update and delete records.
+version_added: "2.2"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the record.
+ required: false
+ default: ""
+ domain:
+ description:
+ - Domain the record is related to.
+ required: true
+ record_type:
+ description:
+ - Type of the record.
+ required: false
+ default: A
+ choices: ['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']
+ aliases: ['rtype', 'type']
+ content:
+ description:
+ - Content of the record.
+ - Required if C(state=present) or C(name="")
+ required: false
+ default: null
+ aliases: ['value', 'address']
+ ttl:
+ description:
+ - TTL of the record in seconds.
+ required: false
+ default: 3600
+ prio:
+ description:
+ - Priority of the record.
+ required: false
+ default: null
+ aliases: ['priority']
+ multiple:
+ description:
+ - Whether there are more than one records with similar C(name).
+ - Only allowed with C(record_type=A).
+ - C(content) will not be updated as it is used as key to find the record.
+ required: false
+ default: null
+ aliases: ['priority']
+ state:
+ description:
+ - State of the record.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ api_key:
+ description:
+ - API key of the Exoscale DNS API.
+ required: false
+ default: null
+ api_secret:
+ description:
+ - Secret key of the Exoscale DNS API.
+ required: false
+ default: null
+ api_timeout:
+ description:
+ - HTTP timeout to Exoscale DNS API.
+ required: false
+ default: 10
+ api_region:
+ description:
+ - Name of the ini section in the C(cloustack.ini) file.
+ required: false
+ default: cloudstack
+ validate_certs:
+ description:
+ - Validate SSL certs of the Exoscale DNS API.
+ required: false
+ default: true
+requirements:
+ - "python >= 2.6"
+notes:
+ - As Exoscale DNS uses the same API key and secret for all services, we reuse the config used for Exscale Compute based on CloudStack.
+ The config is read from several locations, in the following order.
+ The C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) environment variables.
+ A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file,
+ A C(cloudstack.ini) file in the current working directory.
+ A C(.cloudstack.ini) file in the users home directory.
+ Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini).
+ Use the argument C(api_region) to select the section name, default section is C(cloudstack).
+ - This module does not support multiple A records and will complain properly if you try.
+ - More information Exoscale DNS can be found on https://community.exoscale.ch/documentation/dns/.
+ - This module supports check mode and diff.
+'''
+
+EXAMPLES = '''
+# Create or update an A record.
+- local_action:
+ module: exo_dns_record
+ name: web-vm-1
+ domain: example.com
+ content: 1.2.3.4
+
+# Update an existing A record with a new IP.
+- local_action:
+ module: exo_dns_record
+ name: web-vm-1
+ domain: example.com
+ content: 1.2.3.5
+
+# Create another A record with same name.
+- local_action:
+ module: exo_dns_record
+ name: web-vm-1
+ domain: example.com
+ content: 1.2.3.6
+ multiple: yes
+
+# Create or update a CNAME record.
+- local_action:
+ module: exo_dns_record
+ name: www
+ domain: example.com
+ record_type: CNAME
+ content: web-vm-1
+
+# Create or update a MX record.
+- local_action:
+ module: exo_dns_record
+ domain: example.com
+ record_type: MX
+ content: mx1.example.com
+ prio: 10
+
+# delete a MX record.
+- local_action:
+ module: exo_dns_record
+ domain: example.com
+ record_type: MX
+ content: mx1.example.com
+ state: absent
+
+# Remove a record.
+- local_action:
+ module: exo_dns_record
+ name: www
+ domain: example.com
+ state: absent
+'''
+
+RETURN = '''
+---
+exo_dns_record:
+ description: API record results
+ returned: success
+ type: dictionary
+ contains:
+ content:
+ description: value of the record
+ returned: success
+ type: string
+ sample: 1.2.3.4
+ created_at:
+ description: When the record was created
+ returned: success
+ type: string
+ sample: "2016-08-12T15:24:23.989Z"
+ domain:
+ description: Name of the domain
+ returned: success
+ type: string
+ sample: example.com
+ domain_id:
+ description: ID of the domain
+ returned: success
+ type: int
+ sample: 254324
+ id:
+ description: ID of the record
+ returned: success
+ type: int
+ sample: 254324
+ name:
+ description: name of the record
+ returned: success
+ type: string
+ sample: www
+ parent_id:
+ description: ID of the parent
+ returned: success
+ type: int
+ sample: null
+ prio:
+ description: Priority of the record
+ returned: success
+ type: int
+ sample: 10
+ record_type:
+ description: Priority of the record
+ returned: success
+ type: string
+ sample: A
+ system_record:
+ description: Whether the record is a system record or not
+ returned: success
+ type: bool
+ sample: false
+ ttl:
+ description: Time to live of the record
+ returned: success
+ type: int
+ sample: 3600
+ updated_at:
+ description: When the record was updated
+ returned: success
+ type: string
+ sample: "2016-08-12T15:24:23.989Z"
+'''
+
+# import exoscale common
+from ansible.module_utils.exoscale import *
+
+
+class ExoDnsRecord(ExoDns):
+
+ def __init__(self, module):
+ super(ExoDnsRecord, self).__init__(module)
+
+ self.content = self.module.params.get('content')
+ if self.content:
+ self.content = self.content.lower()
+
+ self.domain = self.module.params.get('domain').lower()
+ self.name = self.module.params.get('name').lower()
+ if self.name == self.domain:
+ self.name = ""
+
+ self.multiple = self.module.params.get('multiple')
+ self.record_type = self.module.params.get('record_type')
+ if self.multiple and self.record_type != 'A':
+ self.module.fail_json("Multiple is only usable with record_type A")
+
+
+ def _create_record(self, record):
+ self.result['changed'] = True
+ data = {
+ 'record': {
+ 'name': self.name,
+ 'record_type': self.record_type,
+ 'content': self.content,
+ 'ttl': self.module.params.get('ttl'),
+ 'prio': self.module.params.get('prio'),
+ }
+ }
+ self.result['diff']['after'] = data['record']
+ if not self.module.check_mode:
+ record = self.api_query("/domains/%s/records" % self.domain, "POST", data)
+ return record
+
+ def _update_record(self, record):
+ data = {
+ 'record': {
+ 'name': self.name,
+ 'content': self.content,
+ 'ttl': self.module.params.get('ttl'),
+ 'prio': self.module.params.get('prio'),
+ }
+ }
+ if self.has_changed(data['record'], record['record']):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ record = self.api_query("/domains/%s/records/%s" % (self.domain, record['record']['id']), "PUT", data)
+ return record
+
+ def get_record(self):
+ domain = self.module.params.get('domain')
+ records = self.api_query("/domains/%s/records" % domain, "GET")
+
+ record = None
+ for r in records:
+ found_record = None
+ if r['record']['record_type'] == self.record_type:
+ r_name = r['record']['name'].lower()
+ r_content = r['record']['content'].lower()
+
+ # there are multiple A records but we found an exact match
+ if self.multiple and self.name == r_name and self.content == r_content:
+ record = r
+ break
+
+ # We do not expect to found more then one record with that content
+ if not self.multiple and not self.name and self.content == r_content:
+ found_record = r
+
+ # We do not expect to found more then one record with that name
+ elif not self.multiple and self.name and self.name == r_name:
+ found_record = r
+
+ if record and found_record:
+ self.module.fail_json(msg="More than one record with your params. Use multiple=yes for more than one A record.")
+ if found_record:
+ record = found_record
+ return record
+
+ def present_record(self):
+ record = self.get_record()
+ if not record:
+ record = self._create_record(record);
+ else:
+ record = self._update_record(record);
+ return record
+
+ def absent_record(self):
+ record = self.get_record()
+ if record:
+ self.result['diff']['before'] = record
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.api_query("/domains/%s/records/%s" % (self.domain, record['record']['id']), "DELETE")
+ return record
+
+ def get_result(self, resource):
+ if resource:
+ self.result['exo_dns_record'] = resource['record']
+ self.result['exo_dns_record']['domain'] = self.domain
+ return self.result
+
+
+def main():
+ argument_spec = exo_dns_argument_spec()
+ argument_spec.update(dict(
+ name=dict(default=""),
+ record_type=dict(choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL'], aliases=['rtype', 'type'], default='A'),
+ content=dict(aliases=['value', 'address']),
+ multiple=(dict(type='bool', default=False)),
+ ttl=dict(type='int', default=3600),
+ prio=dict(type='int', aliases=['priority']),
+ domain=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=exo_dns_required_together(),
+ required_if=[
+ ['state', 'present', ['content']],
+ ['name', '', ['content']],
+ ],
+ required_one_of=[
+ ['content', 'name'],
+ ],
+ supports_check_mode=True,
+ )
+
+ exo_dns_record = ExoDnsRecord(module)
+ if module.params.get('state') == "present":
+ resource = exo_dns_record.present_record()
+ else:
+ resource = exo_dns_record.absent_record()
+
+ result = exo_dns_record.get_result(resource)
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_device_dns.py b/network/f5/bigip_device_dns.py
new file mode 100644
index 00000000000..a6c1e8e30d7
--- /dev/null
+++ b/network/f5/bigip_device_dns.py
@@ -0,0 +1,403 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_device_dns
+short_description: Manage BIG-IP device DNS settings
+description:
+ - Manage BIG-IP device DNS settings
+version_added: "2.2"
+options:
+ cache:
+ description:
+ - Specifies whether the system caches DNS lookups or performs the
+ operation each time a lookup is needed. Please note that this applies
+ only to Access Policy Manager features, such as ACLs, web application
+ rewrites, and authentication.
+ required: false
+ default: disable
+ choices:
+ - enable
+ - disable
+ name_servers:
+ description:
+ - A list of name serverz that the system uses to validate DNS lookups
+ forwarders:
+ description:
+ - A list of BIND servers that the system can use to perform DNS lookups
+ search:
+ description:
+ - A list of domains that the system searches for local domain lookups,
+ to resolve local host names.
+ ip_version:
+ description:
+ - Specifies whether the DNS specifies IP addresses using IPv4 or IPv6.
+ required: false
+ choices:
+ - 4
+ - 6
+ state:
+ description:
+ - The state of the variable on the system. When C(present), guarantees
+ that an existing variable is set to C(value).
+ required: false
+ default: present
+ choices:
+ - absent
+ - present
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install requests
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Set the DNS settings on the BIG-IP
+ bigip_device_dns:
+ name_servers:
+ - 208.67.222.222
+ - 208.67.220.220
+ search:
+ - localdomain
+ - lab.local
+ state: present
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+cache:
+ description: The new value of the DNS caching
+ returned: changed
+ type: string
+ sample: "enabled"
+name_servers:
+ description: List of name servers that were added or removed
+ returned: changed
+ type: list
+ sample: "['192.0.2.10', '172.17.12.10']"
+forwarders:
+ description: List of forwarders that were added or removed
+ returned: changed
+ type: list
+ sample: "['192.0.2.10', '172.17.12.10']"
+search:
+ description: List of search domains that were added or removed
+ returned: changed
+ type: list
+ sample: "['192.0.2.10', '172.17.12.10']"
+ip_version:
+ description: IP version that was set that DNS will specify IP addresses in
+ returned: changed
+ type: int
+ sample: 4
+'''
+
+try:
+ from f5.bigip.contexts import TransactionContextManager
+ from f5.bigip import ManagementRoot
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+REQUIRED = ['name_servers', 'search', 'forwarders', 'ip_version', 'cache']
+CACHE = ['disable', 'enable']
+IP = [4, 6]
+
+
+class BigIpDeviceDns(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def flush(self):
+ result = dict()
+ changed = False
+ state = self.params['state']
+
+ if self.dhcp_enabled():
+ raise F5ModuleError(
+ "DHCP on the mgmt interface must be disabled to make use of " +
+ "this module"
+ )
+
+ if state == 'absent':
+ changed = self.absent()
+ else:
+ changed = self.present()
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+ def dhcp_enabled(self):
+ r = self.api.tm.sys.dbs.db.load(name='dhclient.mgmt')
+ if r.value == 'enable':
+ return True
+ else:
+ return False
+
+ def read(self):
+ result = dict()
+
+ cache = self.api.tm.sys.dbs.db.load(name='dns.cache')
+ proxy = self.api.tm.sys.dbs.db.load(name='dns.proxy.__iter__')
+ dns = self.api.tm.sys.dns.load()
+
+ result['cache'] = str(cache.value)
+ result['forwarders'] = str(proxy.value).split(' ')
+
+ if hasattr(dns, 'nameServers'):
+ result['name_servers'] = dns.nameServers
+ if hasattr(dns, 'search'):
+ result['search'] = dns.search
+ if hasattr(dns, 'include') and 'options inet6' in dns.include:
+ result['ip_version'] = 6
+ else:
+ result['ip_version'] = 4
+ return result
+
+ def present(self):
+ params = dict()
+ current = self.read()
+
+ # Temporary locations to hold the changed params
+ update = dict(
+ dns=None,
+ forwarders=None,
+ cache=None
+ )
+
+ nameservers = self.params['name_servers']
+ search_domains = self.params['search']
+ ip_version = self.params['ip_version']
+ forwarders = self.params['forwarders']
+ cache = self.params['cache']
+ check_mode = self.params['check_mode']
+
+ if nameservers:
+ if 'name_servers' in current:
+ if nameservers != current['name_servers']:
+ params['nameServers'] = nameservers
+ else:
+ params['nameServers'] = nameservers
+
+ if search_domains:
+ if 'search' in current:
+ if search_domains != current['search']:
+ params['search'] = search_domains
+ else:
+ params['search'] = search_domains
+
+ if ip_version:
+ if 'ip_version' in current:
+ if ip_version != int(current['ip_version']):
+ if ip_version == 6:
+ params['include'] = 'options inet6'
+ elif ip_version == 4:
+ params['include'] = ''
+ else:
+ if ip_version == 6:
+ params['include'] = 'options inet6'
+ elif ip_version == 4:
+ params['include'] = ''
+
+ if params:
+ self.cparams.update(camel_dict_to_snake_dict(params))
+
+ if 'include' in params:
+ del self.cparams['include']
+ if params['include'] == '':
+ self.cparams['ip_version'] = 4
+ else:
+ self.cparams['ip_version'] = 6
+
+ update['dns'] = params.copy()
+ params = dict()
+
+ if forwarders:
+ if 'forwarders' in current:
+ if forwarders != current['forwarders']:
+ params['forwarders'] = forwarders
+ else:
+ params['forwarders'] = forwarders
+
+ if params:
+ self.cparams.update(camel_dict_to_snake_dict(params))
+ update['forwarders'] = ' '.join(params['forwarders'])
+ params = dict()
+
+ if cache:
+ if 'cache' in current:
+ if cache != current['cache']:
+ params['cache'] = cache
+
+ if params:
+ self.cparams.update(camel_dict_to_snake_dict(params))
+ update['cache'] = params['cache']
+ params = dict()
+
+ if self.cparams:
+ changed = True
+ if check_mode:
+ return changed
+ else:
+ return False
+
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ cache = api.tm.sys.dbs.db.load(name='dns.cache')
+ proxy = api.tm.sys.dbs.db.load(name='dns.proxy.__iter__')
+ dns = api.tm.sys.dns.load()
+
+ # Empty values can be supplied, but you cannot supply the
+ # None value, so we check for that specifically
+ if update['cache'] is not None:
+ cache.update(value=update['cache'])
+ if update['forwarders'] is not None:
+ proxy.update(value=update['forwarders'])
+ if update['dns'] is not None:
+ dns.update(**update['dns'])
+ return changed
+
+ def absent(self):
+ params = dict()
+ current = self.read()
+
+ # Temporary locations to hold the changed params
+ update = dict(
+ dns=None,
+ forwarders=None
+ )
+
+ nameservers = self.params['name_servers']
+ search_domains = self.params['search']
+ forwarders = self.params['forwarders']
+ check_mode = self.params['check_mode']
+
+ if forwarders and 'forwarders' in current:
+ set_current = set(current['forwarders'])
+ set_new = set(forwarders)
+
+ forwarders = set_current - set_new
+ if forwarders != set_current:
+ forwarders = list(forwarders)
+ params['forwarders'] = ' '.join(forwarders)
+
+ if params:
+ changed = True
+ self.cparams.update(camel_dict_to_snake_dict(params))
+ update['forwarders'] = params['forwarders']
+ params = dict()
+
+ if nameservers and 'name_servers' in current:
+ set_current = set(current['name_servers'])
+ set_new = set(nameservers)
+
+ nameservers = set_current - set_new
+ if nameservers != set_current:
+ params['nameServers'] = list(nameservers)
+
+ if search_domains and 'search' in current:
+ set_current = set(current['search'])
+ set_new = set(search_domains)
+
+ search_domains = set_current - set_new
+ if search_domains != set_current:
+ params['search'] = list(search_domains)
+
+ if params:
+ changed = True
+ self.cparams.update(camel_dict_to_snake_dict(params))
+ update['dns'] = params.copy()
+ params = dict()
+
+ if not self.cparams:
+ return False
+
+ if check_mode:
+ return changed
+
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ proxy = api.tm.sys.dbs.db.load(name='dns.proxy.__iter__')
+ dns = api.tm.sys.dns.load()
+
+ if update['forwarders'] is not None:
+ proxy.update(value=update['forwarders'])
+ if update['dns'] is not None:
+ dns.update(**update['dns'])
+ return changed
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ cache=dict(required=False, choices=CACHE, default=None),
+ name_servers=dict(required=False, default=None, type='list'),
+ forwarders=dict(required=False, default=None, type='list'),
+ search=dict(required=False, default=None, type='list'),
+ ip_version=dict(required=False, default=None, choices=IP, type='int')
+ )
+ argument_spec.update(meta_args)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[REQUIRED],
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpDeviceDns(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_device_ntp.py b/network/f5/bigip_device_ntp.py
new file mode 100644
index 00000000000..23ed81b7819
--- /dev/null
+++ b/network/f5/bigip_device_ntp.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_device_ntp
+short_description: Manage NTP servers on a BIG-IP
+description:
+ - Manage NTP servers on a BIG-IP
+version_added: "2.2"
+options:
+ ntp_servers:
+ description:
+ - A list of NTP servers to set on the device. At least one of C(ntp_servers)
+ or C(timezone) is required.
+ required: false
+ default: []
+ state:
+ description:
+ - The state of the NTP servers on the system. When C(present), guarantees
+ that the NTP servers are set on the system. When C(absent), removes the
+ specified NTP servers from the device configuration.
+ required: false
+ default: present
+ choices:
+ - absent
+ - present
+ timezone:
+ description:
+ - The timezone to set for NTP lookups. At least one of C(ntp_servers) or
+ C(timezone) is required.
+ default: UTC
+ required: false
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Set NTP server
+ bigip_device_ntp:
+ ntp_servers:
+ - "192.0.2.23"
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+
+- name: Set timezone
+ bigip_device_ntp:
+ password: "secret"
+ server: "lb.mydomain.com"
+ timezone: "America/Los_Angeles"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+ntp_servers:
+ description: The NTP servers that were set on the device
+ returned: changed
+ type: list
+ sample: ["192.0.2.23", "192.0.2.42"]
+timezone:
+ description: The timezone that was set on the device
+ returned: changed
+ type: string
+ sample: "true"
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+class BigIpDeviceNtp(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def flush(self):
+ result = dict()
+ changed = False
+ state = self.params['state']
+
+ try:
+ if state == "present":
+ changed = self.present()
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ if 'servers' in self.cparams:
+ self.cparams['ntp_servers'] = self.cparams.pop('servers')
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ r = self.api.tm.sys.ntp.load()
+
+ if hasattr(r, 'servers'):
+ # Deliberately using sets to supress duplicates
+ p['servers'] = set([str(x) for x in r.servers])
+ if hasattr(r, 'timezone'):
+ p['timezone'] = str(r.timezone)
+ return p
+
+ def present(self):
+ changed = False
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ ntp_servers = self.params['ntp_servers']
+ timezone = self.params['timezone']
+
+ # NTP servers can be set independently
+ if ntp_servers is not None:
+ if 'servers' in current:
+ items = set(ntp_servers)
+ if items != current['servers']:
+ params['servers'] = list(ntp_servers)
+ else:
+ params['servers'] = ntp_servers
+
+ # Timezone can be set independently
+ if timezone is not None:
+ if 'timezone' in current and current['timezone'] != timezone:
+ params['timezone'] = timezone
+
+ if params:
+ changed = True
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return changed
+ else:
+ return changed
+
+ r = self.api.tm.sys.ntp.load()
+ r.update(**params)
+ r.refresh()
+
+ return changed
+
+ def absent(self):
+ changed = False
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ ntp_servers = self.params['ntp_servers']
+
+ if not ntp_servers:
+ raise F5ModuleError(
+ "Absent can only be used when removing NTP servers"
+ )
+
+ if ntp_servers and 'servers' in current:
+ servers = current['servers']
+ new_servers = [x for x in servers if x not in ntp_servers]
+
+ if servers != new_servers:
+ params['servers'] = new_servers
+
+ if params:
+ changed = True
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return changed
+ else:
+ return changed
+
+ r = self.api.tm.sys.ntp.load()
+ r.update(**params)
+ r.refresh()
+ return changed
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ ntp_servers=dict(required=False, type='list', default=None),
+ timezone=dict(default=None, required=False)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['ntp_servers', 'timezone']
+ ],
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpDeviceNtp(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_device_sshd.py b/network/f5/bigip_device_sshd.py
new file mode 100644
index 00000000000..87ffeb6bee0
--- /dev/null
+++ b/network/f5/bigip_device_sshd.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_device_sshd
+short_description: Manage the SSHD settings of a BIG-IP
+description:
+ - Manage the SSHD settings of a BIG-IP
+version_added: "2.2"
+options:
+ allow:
+ description:
+ - Specifies, if you have enabled SSH access, the IP address or address
+ range for other systems that can use SSH to communicate with this
+ system.
+ choices:
+ - all
+ - IP address, such as 172.27.1.10
+ - IP range, such as 172.27.*.* or 172.27.0.0/255.255.0.0
+ banner:
+ description:
+ - Whether to enable the banner or not.
+ required: false
+ choices:
+ - enabled
+ - disabled
+ banner_text:
+ description:
+ - Specifies the text to include on the pre-login banner that displays
+ when a user attempts to login to the system using SSH.
+ required: false
+ inactivity_timeout:
+ description:
+ - Specifies the number of seconds before inactivity causes an SSH
+ session to log out.
+ required: false
+ log_level:
+ description:
+ - Specifies the minimum SSHD message level to include in the system log.
+ choices:
+ - debug
+ - debug1
+ - debug2
+ - debug3
+ - error
+ - fatal
+ - info
+ - quiet
+ - verbose
+ login:
+ description:
+ - Specifies, when checked C(enabled), that the system accepts SSH
+ communications.
+ choices:
+ - enabled
+ - disabled
+ required: false
+ port:
+ description:
+ - Port that you want the SSH daemon to run on.
+ required: false
+notes:
+ - Requires the f5-sdk Python package on the host This is as easy as pip
+ install f5-sdk.
+ - Requires BIG-IP version 12.0.0 or greater
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Set the banner for the SSHD service from a string
+ bigip_device_sshd:
+ banner: "enabled"
+ banner_text: "banner text goes here"
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ delegate_to: localhost
+
+- name: Set the banner for the SSHD service from a file
+ bigip_device_sshd:
+ banner: "enabled"
+ banner_text: "{{ lookup('file', '/path/to/file') }}"
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ delegate_to: localhost
+
+- name: Set the SSHD service to run on port 2222
+ bigip_device_sshd:
+ password: "secret"
+ port: 2222
+ server: "lb.mydomain.com"
+ user: "admin"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+allow:
+ description: >
+ Specifies, if you have enabled SSH access, the IP address or address
+ range for other systems that can use SSH to communicate with this
+ system.
+ returned: changed
+ type: string
+ sample: "192.0.2.*"
+banner:
+ description: Whether the banner is enabled or not.
+ returned: changed
+ type: string
+ sample: "true"
+banner_text:
+ description: >
+ Specifies the text included on the pre-login banner that
+ displays when a user attempts to login to the system using SSH.
+ returned: changed and success
+ type: string
+ sample: "This is a corporate device. Connecting to it without..."
+inactivity_timeout:
+ description: >
+ The number of seconds before inactivity causes an SSH.
+ session to log out
+ returned: changed
+ type: int
+ sample: "10"
+log_level:
+ description: The minimum SSHD message level to include in the system log.
+ returned: changed
+ type: string
+ sample: "debug"
+login:
+ description: Specifies that the system accepts SSH communications or not.
+ return: changed
+ type: bool
+ sample: true
+port:
+ description: Port that you want the SSH daemon to run on.
+ return: changed
+ type: int
+ sample: 22
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+CHOICES = ['enabled', 'disabled']
+LEVELS = ['debug', 'debug1', 'debug2', 'debug3', 'error', 'fatal', 'info',
+ 'quiet', 'verbose']
+
+
+class BigIpDeviceSshd(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def update(self):
+ changed = False
+ current = self.read()
+ params = dict()
+
+ allow = self.params['allow']
+ banner = self.params['banner']
+ banner_text = self.params['banner_text']
+ timeout = self.params['inactivity_timeout']
+ log_level = self.params['log_level']
+ login = self.params['login']
+ port = self.params['port']
+ check_mode = self.params['check_mode']
+
+ if allow:
+ if 'allow' in current:
+ items = set(allow)
+ if items != current['allow']:
+ params['allow'] = list(items)
+ else:
+ params['allow'] = allow
+
+ if banner:
+ if 'banner' in current:
+ if banner != current['banner']:
+ params['banner'] = banner
+ else:
+ params['banner'] = banner
+
+ if banner_text:
+ if 'banner_text' in current:
+ if banner_text != current['banner_text']:
+ params['bannerText'] = banner_text
+ else:
+ params['bannerText'] = banner_text
+
+ if timeout:
+ if 'inactivity_timeout' in current:
+ if timeout != current['inactivity_timeout']:
+ params['inactivityTimeout'] = timeout
+ else:
+ params['inactivityTimeout'] = timeout
+
+ if log_level:
+ if 'log_level' in current:
+ if log_level != current['log_level']:
+ params['logLevel'] = log_level
+ else:
+ params['logLevel'] = log_level
+
+ if login:
+ if 'login' in current:
+ if login != current['login']:
+ params['login'] = login
+ else:
+ params['login'] = login
+
+ if port:
+ if 'port' in current:
+ if port != current['port']:
+ params['port'] = port
+ else:
+ params['port'] = port
+
+ if params:
+ changed = True
+ if check_mode:
+ return changed
+ self.cparams = camel_dict_to_snake_dict(params)
+ else:
+ return changed
+
+ r = self.api.tm.sys.sshd.load()
+ r.update(**params)
+ r.refresh()
+
+ return changed
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ r = self.api.tm.sys.sshd.load()
+
+ if hasattr(r, 'allow'):
+ # Deliberately using sets to supress duplicates
+ p['allow'] = set([str(x) for x in r.allow])
+ if hasattr(r, 'banner'):
+ p['banner'] = str(r.banner)
+ if hasattr(r, 'bannerText'):
+ p['banner_text'] = str(r.bannerText)
+ if hasattr(r, 'inactivityTimeout'):
+ p['inactivity_timeout'] = str(r.inactivityTimeout)
+ if hasattr(r, 'logLevel'):
+ p['log_level'] = str(r.logLevel)
+ if hasattr(r, 'login'):
+ p['login'] = str(r.login)
+ if hasattr(r, 'port'):
+ p['port'] = int(r.port)
+ return p
+
+ def flush(self):
+ result = dict()
+ changed = False
+
+ try:
+ changed = self.update()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ allow=dict(required=False, default=None, type='list'),
+ banner=dict(required=False, default=None, choices=CHOICES),
+ banner_text=dict(required=False, default=None),
+ inactivity_timeout=dict(required=False, default=None, type='int'),
+ log_level=dict(required=False, default=None, choices=LEVELS),
+ login=dict(required=False, default=None, choices=CHOICES),
+ port=dict(required=False, default=None, type='int'),
+ state=dict(default='present', choices=['present'])
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpDeviceSshd(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py
index 1b106ba0a3e..33d5e1937e6 100644
--- a/network/f5/bigip_facts.py
+++ b/network/f5/bigip_facts.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-
+#
# (c) 2013, Matt Hite
#
# This file is part of Ansible
@@ -18,101 +18,85 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: bigip_facts
-short_description: "Collect facts from F5 BIG-IP devices"
+short_description: Collect facts from F5 BIG-IP devices
description:
- - "Collect facts from F5 BIG-IP devices via iControl SOAP API"
+ - Collect facts from F5 BIG-IP devices via iControl SOAP API
version_added: "1.6"
-author: "Matt Hite (@mhite)"
+author:
+ - Matt Hite (@mhite)
+ - Tim Rupp (@caphrim007)
notes:
- - "Requires BIG-IP software version >= 11.4"
- - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- - "Best run as a local_action in your playbook"
- - "Tested with manager and above account privilege level"
-
+ - Requires BIG-IP software version >= 11.4
+ - F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
+ - Best run as a local_action in your playbook
+ - Tested with manager and above account privilege level
+ - C(provision) facts were added in 2.2
requirements:
- - bigsuds
+ - bigsuds
options:
- server:
- description:
- - BIG-IP host
- required: true
- default: null
- choices: []
- aliases: []
- user:
- description:
- - BIG-IP username
- required: true
- default: null
- choices: []
- aliases: []
- password:
- description:
- - BIG-IP password
- required: true
- default: null
- choices: []
- aliases: []
- validate_certs:
- description:
- - If C(no), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
- required: false
- default: 'yes'
- choices: ['yes', 'no']
- version_added: 2.0
- session:
- description:
- - BIG-IP session support; may be useful to avoid concurrency
- issues in certain circumstances.
- required: false
- default: true
- choices: []
- aliases: []
- include:
- description:
- - Fact category or list of categories to collect
- required: true
- default: null
- choices: ['address_class', 'certificate', 'client_ssl_profile',
- 'device', 'device_group', 'interface', 'key', 'node', 'pool',
- 'rule', 'self_ip', 'software', 'system_info', 'traffic_group',
- 'trunk', 'virtual_address', 'virtual_server', 'vlan']
- aliases: []
- filter:
- description:
- - Shell-style glob matching string used to filter fact keys. Not
- applicable for software and system_info fact categories.
- required: false
- default: null
- choices: []
- aliases: []
+ session:
+ description:
+ - BIG-IP session support; may be useful to avoid concurrency
+ issues in certain circumstances.
+ required: false
+ default: true
+ choices: []
+ aliases: []
+ include:
+ description:
+ - Fact category or list of categories to collect
+ required: true
+ default: null
+ choices:
+ - address_class
+ - certificate
+ - client_ssl_profile
+ - device
+ - device_group
+ - interface
+ - key
+ - node
+ - pool
+ - provision
+ - rule
+ - self_ip
+ - software
+ - system_info
+ - traffic_group
+ - trunk
+ - virtual_address
+ - virtual_server
+ - vlan
+ aliases: []
+ filter:
+ description:
+ - Shell-style glob matching string used to filter fact keys. Not
+ applicable for software, provision, and system_info fact categories.
+ required: false
+ default: null
+ choices: []
+ aliases: []
+extends_documentation_fragment: f5
'''
EXAMPLES = '''
-
-## playbook task examples:
-
----
-# file bigip-test.yml
-# ...
-- hosts: bigip-test
- tasks:
- - name: Collect BIG-IP facts
- local_action: >
- bigip_facts
- server=lb.mydomain.com
- user=admin
- password=mysecret
- include=interface,vlan
-
+- name: Collect BIG-IP facts
+ bigip_facts:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ include: "interface,vlan"
+ delegate_to: localhost
'''
try:
- import bigsuds
from suds import MethodNotFound, WebFault
except ImportError:
bigsuds_found = False
@@ -120,12 +104,9 @@
bigsuds_found = True
import fnmatch
-import traceback
import re
+import traceback
-# ===========================================
-# bigip_facts module specific support methods.
-#
class F5(object):
"""F5 iControl class.
@@ -136,8 +117,8 @@ class F5(object):
api: iControl API instance.
"""
- def __init__(self, host, user, password, session=False):
- self.api = bigsuds.BIGIP(hostname=host, username=user, password=password)
+ def __init__(self, host, user, password, session=False, validate_certs=True, port=443):
+ self.api = bigip_api(host, user, password, validate_certs, port)
if session:
self.start_session()
@@ -967,6 +948,7 @@ def get_verification_status(self):
def get_definition(self):
return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)]
+
class Nodes(object):
"""Nodes class.
@@ -1101,7 +1083,7 @@ def get_list(self):
def get_address_class(self):
key = self.api.LocalLB.Class.get_address_class(self.address_classes)
value = self.api.LocalLB.Class.get_address_class_member_data_value(key)
- result = map(zip, [x['members'] for x in key], value)
+ result = list(map(zip, [x['members'] for x in key], value))
return result
def get_description(self):
@@ -1364,6 +1346,35 @@ def get_uptime(self):
return self.api.System.SystemInfo.get_uptime()
+class ProvisionInfo(object):
+ """Provision information class.
+
+ F5 BIG-IP provision information class.
+
+ Attributes:
+ api: iControl API instance.
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def get_list(self):
+ result = []
+ list = self.api.Management.Provision.get_list()
+ for item in list:
+ item = item.lower().replace('tmos_module_', '')
+ result.append(item)
+ return result
+
+ def get_provisioned_list(self):
+ result = []
+ list = self.api.Management.Provision.get_provisioned_list()
+ for item in list:
+ item = item.lower().replace('tmos_module_', '')
+ result.append(item)
+ return result
+
+
def generate_dict(api_obj, fields):
result_dict = {}
lists = []
@@ -1383,6 +1394,7 @@ def generate_dict(api_obj, fields):
result_dict[j] = temp
return result_dict
+
def generate_simple_dict(api_obj, fields):
result_dict = {}
for field in fields:
@@ -1394,6 +1406,7 @@ def generate_simple_dict(api_obj, fields):
result_dict[field] = api_response
return result_dict
+
def generate_interface_dict(f5, regex):
interfaces = Interfaces(f5.get_api(), regex)
fields = ['active_media', 'actual_flow_control', 'bundle_state',
@@ -1408,6 +1421,7 @@ def generate_interface_dict(f5, regex):
'stp_protocol_detection_reset_state']
return generate_dict(interfaces, fields)
+
def generate_self_ip_dict(f5, regex):
self_ips = SelfIPs(f5.get_api(), regex)
fields = ['address', 'allow_access_list', 'description',
@@ -1416,6 +1430,7 @@ def generate_self_ip_dict(f5, regex):
'vlan', 'is_traffic_group_inherited']
return generate_dict(self_ips, fields)
+
def generate_trunk_dict(f5, regex):
trunks = Trunks(f5.get_api(), regex)
fields = ['active_lacp_state', 'configured_member_count', 'description',
@@ -1425,6 +1440,7 @@ def generate_trunk_dict(f5, regex):
'stp_protocol_detection_reset_state']
return generate_dict(trunks, fields)
+
def generate_vlan_dict(f5, regex):
vlans = Vlans(f5.get_api(), regex)
fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description',
@@ -1436,6 +1452,7 @@ def generate_vlan_dict(f5, regex):
'source_check_state', 'true_mac_address', 'vlan_id']
return generate_dict(vlans, fields)
+
def generate_vs_dict(f5, regex):
virtual_servers = VirtualServers(f5.get_api(), regex)
fields = ['actual_hardware_acceleration', 'authentication_profile',
@@ -1456,6 +1473,7 @@ def generate_vs_dict(f5, regex):
'translate_port_state', 'type', 'vlan', 'wildmask']
return generate_dict(virtual_servers, fields)
+
def generate_pool_dict(f5, regex):
pools = Pools(f5.get_api(), regex)
fields = ['action_on_service_down', 'active_member_count',
@@ -1472,6 +1490,7 @@ def generate_pool_dict(f5, regex):
'simple_timeout', 'slow_ramp_time']
return generate_dict(pools, fields)
+
def generate_device_dict(f5, regex):
devices = Devices(f5.get_api(), regex)
fields = ['active_modules', 'base_mac_address', 'blade_addresses',
@@ -1484,14 +1503,16 @@ def generate_device_dict(f5, regex):
'timelimited_modules', 'timezone', 'unicast_addresses']
return generate_dict(devices, fields)
+
def generate_device_group_dict(f5, regex):
device_groups = DeviceGroups(f5.get_api(), regex)
- fields = ['all_preferred_active', 'autosync_enabled_state','description',
+ fields = ['all_preferred_active', 'autosync_enabled_state', 'description',
'device', 'full_load_on_sync_state',
'incremental_config_sync_size_maximum',
'network_failover_enabled_state', 'sync_status', 'type']
return generate_dict(device_groups, fields)
+
def generate_traffic_group_dict(f5, regex):
traffic_groups = TrafficGroups(f5.get_api(), regex)
fields = ['auto_failback_enabled_state', 'auto_failback_time',
@@ -1500,12 +1521,14 @@ def generate_traffic_group_dict(f5, regex):
'unit_id']
return generate_dict(traffic_groups, fields)
+
def generate_rule_dict(f5, regex):
rules = Rules(f5.get_api(), regex)
fields = ['definition', 'description', 'ignore_vertification',
'verification_status']
return generate_dict(rules, fields)
+
def generate_node_dict(f5, regex):
nodes = Nodes(f5.get_api(), regex)
fields = ['address', 'connection_limit', 'description', 'dynamic_ratio',
@@ -1513,6 +1536,7 @@ def generate_node_dict(f5, regex):
'object_status', 'rate_limit', 'ratio', 'session_status']
return generate_dict(nodes, fields)
+
def generate_virtual_address_dict(f5, regex):
virtual_addresses = VirtualAddresses(f5.get_api(), regex)
fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit',
@@ -1521,19 +1545,23 @@ def generate_virtual_address_dict(f5, regex):
'route_advertisement_state', 'traffic_group']
return generate_dict(virtual_addresses, fields)
+
def generate_address_class_dict(f5, regex):
address_classes = AddressClasses(f5.get_api(), regex)
fields = ['address_class', 'description']
return generate_dict(address_classes, fields)
+
def generate_certificate_dict(f5, regex):
certificates = Certificates(f5.get_api(), regex)
return dict(zip(certificates.get_list(), certificates.get_certificate_list()))
+
def generate_key_dict(f5, regex):
keys = Keys(f5.get_api(), regex)
return dict(zip(keys.get_list(), keys.get_key_list()))
+
def generate_client_ssl_profile_dict(f5, regex):
profiles = ProfileClientSSL(f5.get_api(), regex)
fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth',
@@ -1557,6 +1585,7 @@ def generate_client_ssl_profile_dict(f5, regex):
'unclean_shutdown_state', 'is_base_profile', 'is_system_profile']
return generate_dict(profiles, fields)
+
def generate_system_info_dict(f5):
system_info = SystemInfo(f5.get_api())
fields = ['base_mac_address',
@@ -1569,62 +1598,68 @@ def generate_system_info_dict(f5):
'time_zone', 'uptime']
return generate_simple_dict(system_info, fields)
+
def generate_software_list(f5):
software = Software(f5.get_api())
software_list = software.get_all_software_status()
return software_list
-def disable_ssl_cert_validation():
- # You probably only want to do this for testing and never in production.
- # From https://www.python.org/dev/peps/pep-0476/#id29
- import ssl
- ssl._create_default_https_context = ssl._create_unverified_context
+
+def generate_provision_dict(f5):
+ provisioned = ProvisionInfo(f5.get_api())
+ fields = ['list', 'provisioned_list']
+ return generate_simple_dict(provisioned, fields)
def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ session=dict(type='bool', default=False),
+ include=dict(type='list', required=True),
+ filter=dict(type='str', required=False),
+ )
+ argument_spec.update(meta_args)
+
module = AnsibleModule(
- argument_spec = dict(
- server = dict(type='str', required=True),
- user = dict(type='str', required=True),
- password = dict(type='str', required=True),
- validate_certs = dict(default='yes', type='bool'),
- session = dict(type='bool', default=False),
- include = dict(type='list', required=True),
- filter = dict(type='str', required=False),
- )
+ argument_spec=argument_spec
)
if not bigsuds_found:
- module.fail_json(msg="the python suds and bigsuds modules is required")
+ module.fail_json(msg="the python suds and bigsuds modules are required")
server = module.params['server']
+ server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
validate_certs = module.params['validate_certs']
session = module.params['session']
fact_filter = module.params['filter']
+
+ if validate_certs:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
if fact_filter:
regex = fnmatch.translate(fact_filter)
else:
regex = None
- include = map(lambda x: x.lower(), module.params['include'])
+ include = [x.lower() for x in module.params['include']]
valid_includes = ('address_class', 'certificate', 'client_ssl_profile',
'device', 'device_group', 'interface', 'key', 'node',
- 'pool', 'rule', 'self_ip', 'software', 'system_info',
- 'traffic_group', 'trunk', 'virtual_address',
- 'virtual_server', 'vlan')
+ 'pool', 'provision', 'rule', 'self_ip', 'software',
+ 'system_info', 'traffic_group', 'trunk',
+ 'virtual_address', 'virtual_server', 'vlan')
include_test = map(lambda x: x in valid_includes, include)
if not all(include_test):
module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include)))
- if not validate_certs:
- disable_ssl_cert_validation()
-
try:
facts = {}
if len(include) > 0:
- f5 = F5(server, user, password, session)
+ f5 = F5(server, user, password, session, validate_certs, server_port)
saved_active_folder = f5.get_active_folder()
saved_recursive_query_state = f5.get_recursive_query_state()
if saved_active_folder != "/":
@@ -1644,6 +1679,8 @@ def main():
facts['virtual_server'] = generate_vs_dict(f5, regex)
if 'pool' in include:
facts['pool'] = generate_pool_dict(f5, regex)
+ if 'provision' in include:
+ facts['provision'] = generate_provision_dict(f5)
if 'device' in include:
facts['device'] = generate_device_dict(f5, regex)
if 'device_group' in include:
@@ -1678,14 +1715,14 @@ def main():
result = {'ansible_facts': facts}
- except Exception, e:
+ except Exception as e:
module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc()))
module.exit_json(**result)
# include magic from lib/ansible/module_common.py
from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
-
diff --git a/network/f5/bigip_gtm_datacenter.py b/network/f5/bigip_gtm_datacenter.py
new file mode 100644
index 00000000000..fff876007cf
--- /dev/null
+++ b/network/f5/bigip_gtm_datacenter.py
@@ -0,0 +1,372 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_gtm_datacenter
+short_description: Manage Datacenter configuration in BIG-IP
+description:
+ - Manage BIG-IP data center configuration. A data center defines the location
+ where the physical network components reside, such as the server and link
+ objects that share the same subnet on the network. This module is able to
+ manipulate the data center definitions in a BIG-IP
+version_added: "2.2"
+options:
+ contact:
+ description:
+ - The name of the contact for the data center.
+ description:
+ description:
+ - The description of the data center.
+ enabled:
+ description:
+ - Whether the data center should be enabled. At least one of C(state) and
+ C(enabled) are required.
+ choices:
+ - yes
+ - no
+ location:
+ description:
+ - The location of the data center.
+ name:
+ description:
+ - The name of the data center.
+ required: true
+ state:
+ description:
+ - The state of the datacenter on the BIG-IP. When C(present), guarantees
+ that the data center exists. When C(absent) removes the data center
+ from the BIG-IP. C(enabled) will enable the data center and C(disabled)
+ will ensure the data center is disabled. At least one of state and
+ enabled are required.
+ choices:
+ - present
+ - absent
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as
+ pip install f5-sdk.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Create data center "New York"
+ bigip_gtm_datacenter:
+ server: "big-ip"
+ name: "New York"
+ location: "222 West 23rd"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+contact:
+ description: The contact that was set on the datacenter
+ returned: changed
+ type: string
+ sample: "admin@root.local"
+description:
+ description: The description that was set for the datacenter
+ returned: changed
+ type: string
+ sample: "Datacenter in NYC"
+enabled:
+ description: Whether the datacenter is enabled or not
+ returned: changed
+ type: bool
+ sample: true
+location:
+ description: The location that is set for the datacenter
+ returned: changed
+ type: string
+ sample: "222 West 23rd"
+name:
+ description: Name of the datacenter being manipulated
+ returned: changed
+ type: string
+ sample: "foo"
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+class BigIpGtmDatacenter(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def create(self):
+ params = dict()
+
+ check_mode = self.params['check_mode']
+ contact = self.params['contact']
+ description = self.params['description']
+ location = self.params['location']
+ name = self.params['name']
+ partition = self.params['partition']
+ enabled = self.params['enabled']
+
+ # Specifically check for None because a person could supply empty
+ # values which would technically still be valid
+ if contact is not None:
+ params['contact'] = contact
+
+ if description is not None:
+ params['description'] = description
+
+ if location is not None:
+ params['location'] = location
+
+ if enabled is not None:
+ params['enabled'] = True
+ else:
+ params['disabled'] = False
+
+ params['name'] = name
+ params['partition'] = partition
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ d = self.api.tm.gtm.datacenters.datacenter
+ d.create(**params)
+
+ if not self.exists():
+ raise F5ModuleError("Failed to create the datacenter")
+ return True
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ name = self.params['name']
+ partition = self.params['partition']
+ r = self.api.tm.gtm.datacenters.datacenter.load(
+ name=name,
+ partition=partition
+ )
+
+ if hasattr(r, 'servers'):
+ # Deliberately using sets to supress duplicates
+ p['servers'] = set([str(x) for x in r.servers])
+ if hasattr(r, 'contact'):
+ p['contact'] = str(r.contact)
+ if hasattr(r, 'location'):
+ p['location'] = str(r.location)
+ if hasattr(r, 'description'):
+ p['description'] = str(r.description)
+ if r.enabled:
+ p['enabled'] = True
+ else:
+ p['enabled'] = False
+ p['name'] = name
+ return p
+
+ def update(self):
+ changed = False
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ contact = self.params['contact']
+ description = self.params['description']
+ location = self.params['location']
+ name = self.params['name']
+ partition = self.params['partition']
+ enabled = self.params['enabled']
+
+ if contact is not None:
+ if 'contact' in current:
+ if contact != current['contact']:
+ params['contact'] = contact
+ else:
+ params['contact'] = contact
+
+ if description is not None:
+ if 'description' in current:
+ if description != current['description']:
+ params['description'] = description
+ else:
+ params['description'] = description
+
+ if location is not None:
+ if 'location' in current:
+ if location != current['location']:
+ params['location'] = location
+ else:
+ params['location'] = location
+
+ if enabled is not None:
+ if current['enabled'] != enabled:
+ if enabled is True:
+ params['enabled'] = True
+ params['disabled'] = False
+ else:
+ params['disabled'] = True
+ params['enabled'] = False
+
+ if params:
+ changed = True
+ if check_mode:
+ return changed
+ self.cparams = camel_dict_to_snake_dict(params)
+ else:
+ return changed
+
+ r = self.api.tm.gtm.datacenters.datacenter.load(
+ name=name,
+ partition=partition
+ )
+ r.update(**params)
+ r.refresh()
+
+ return True
+
+ def delete(self):
+ params = dict()
+ check_mode = self.params['check_mode']
+
+ params['name'] = self.params['name']
+ params['partition'] = self.params['partition']
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ dc = self.api.tm.gtm.datacenters.datacenter.load(**params)
+ dc.delete()
+
+ if self.exists():
+ raise F5ModuleError("Failed to delete the datacenter")
+ return True
+
+ def present(self):
+ changed = False
+
+ if self.exists():
+ changed = self.update()
+ else:
+ changed = self.create()
+
+ return changed
+
+ def absent(self):
+ changed = False
+
+ if self.exists():
+ changed = self.delete()
+
+ return changed
+
+ def exists(self):
+ name = self.params['name']
+ partition = self.params['partition']
+
+ return self.api.tm.gtm.datacenters.datacenter.exists(
+ name=name,
+ partition=partition
+ )
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+ enabled = self.params['enabled']
+
+ if state is None and enabled is None:
+ module.fail_json(msg="Neither 'state' nor 'enabled' set")
+
+ try:
+ if state == "present":
+ changed = self.present()
+
+ # Ensure that this field is not returned to the user since it
+ # is not a valid parameter to the module.
+ if 'disabled' in self.cparams:
+ del self.cparams['disabled']
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ contact=dict(required=False, default=None),
+ description=dict(required=False, default=None),
+ enabled=dict(required=False, type='bool', default=None, choices=BOOLEANS),
+ location=dict(required=False, default=None),
+ name=dict(required=True)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpGtmDatacenter(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_gtm_facts.py b/network/f5/bigip_gtm_facts.py
new file mode 100644
index 00000000000..9e3fc8b492f
--- /dev/null
+++ b/network/f5/bigip_gtm_facts.py
@@ -0,0 +1,495 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_gtm_facts
+short_description: Collect facts from F5 BIG-IP GTM devices.
+description:
+ - Collect facts from F5 BIG-IP GTM devices.
+version_added: "2.3"
+options:
+ include:
+ description:
+ - Fact category to collect
+ required: true
+ choices:
+ - pool
+ - wide_ip
+ - virtual_server
+ filter:
+ description:
+ - Perform regex filter of response. Filtering is done on the name of
+ the resource. Valid filters are anything that can be provided to
+ Python's C(re) module.
+ required: false
+ default: None
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as
+ pip install f5-sdk
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Get pool facts
+ bigip_gtm_facts:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ include: "pool"
+ filter: "my_pool"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+wide_ip:
+ description:
+ Contains the lb method for the wide ip and the pools
+ that are within the wide ip.
+ returned: changed
+ type: dict
+ sample:
+ wide_ip:
+ - enabled: "True"
+ failure_rcode: "noerror"
+ failure_rcode_response: "disabled"
+ failure_rcode_ttl: "0"
+ full_path: "/Common/foo.ok.com"
+ last_resort_pool: ""
+ minimal_response: "enabled"
+ name: "foo.ok.com"
+ partition: "Common"
+ persist_cidr_ipv4: "32"
+ persist_cidr_ipv6: "128"
+ persistence: "disabled"
+ pool_lb_mode: "round-robin"
+ pools:
+ - name: "d3qw"
+ order: "0"
+ partition: "Common"
+ ratio: "1"
+ ttl_persistence: "3600"
+ type: "naptr"
+pool:
+ description: Contains the pool object status and enabled status.
+ returned: changed
+ type: dict
+ sample:
+ pool:
+ - alternate_mode: "round-robin"
+ dynamic_ratio: "disabled"
+ enabled: "True"
+ fallback_mode: "return-to-dns"
+ full_path: "/Common/d3qw"
+ load_balancing_mode: "round-robin"
+ manual_resume: "disabled"
+ max_answers_returned: "1"
+ members:
+ - disabled: "True"
+ flags: "a"
+ full_path: "ok3.com"
+ member_order: "0"
+ name: "ok3.com"
+ order: "10"
+ preference: "10"
+ ratio: "1"
+ service: "80"
+ name: "d3qw"
+ partition: "Common"
+ qos_hit_ratio: "5"
+ qos_hops: "0"
+ qos_kilobytes_second: "3"
+ qos_lcs: "30"
+ qos_packet_rate: "1"
+ qos_rtt: "50"
+ qos_topology: "0"
+ qos_vs_capacity: "0"
+ qos_vs_score: "0"
+ ttl: "30"
+ type: "naptr"
+ verify_member_availability: "disabled"
+virtual_server:
+ description:
+ Contains the virtual server enabled and availability
+ status, and address
+ returned: changed
+ type: dict
+ sample:
+ virtual_server:
+ - addresses:
+ - device_name: "/Common/qweqwe"
+ name: "10.10.10.10"
+ translation: "none"
+ datacenter: "/Common/xfxgh"
+ enabled: "True"
+ expose_route_domains: "no"
+ full_path: "/Common/qweqwe"
+ iq_allow_path: "yes"
+ iq_allow_service_check: "yes"
+ iq_allow_snmp: "yes"
+ limit_cpu_usage: "0"
+ limit_cpu_usage_status: "disabled"
+ limit_max_bps: "0"
+ limit_max_bps_status: "disabled"
+ limit_max_connections: "0"
+ limit_max_connections_status: "disabled"
+ limit_max_pps: "0"
+ limit_max_pps_status: "disabled"
+ limit_mem_avail: "0"
+ limit_mem_avail_status: "disabled"
+ link_discovery: "disabled"
+ monitor: "/Common/bigip "
+ name: "qweqwe"
+ partition: "Common"
+ product: "single-bigip"
+ virtual_server_discovery: "disabled"
+ virtual_servers:
+ - destination: "10.10.10.10:0"
+ enabled: "True"
+ full_path: "jsdfhsd"
+ limit_max_bps: "0"
+ limit_max_bps_status: "disabled"
+ limit_max_connections: "0"
+ limit_max_connections_status: "disabled"
+ limit_max_pps: "0"
+ limit_max_pps_status: "disabled"
+ name: "jsdfhsd"
+ translation_address: "none"
+ translation_port: "0"
+'''
+
+try:
+ from distutils.version import LooseVersion
+ from f5.bigip.contexts import TransactionContextManager
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+import re
+
+
+class BigIpGtmFactsCommon(object):
+ def __init__(self):
+ self.api = None
+ self.attributes_to_remove = [
+ 'kind', 'generation', 'selfLink', '_meta_data',
+ 'membersReference', 'datacenterReference',
+ 'virtualServersReference', 'nameReference'
+ ]
+ self.gtm_types = dict(
+ a_s='a',
+ aaaas='aaaa',
+ cnames='cname',
+ mxs='mx',
+ naptrs='naptr',
+ srvs='srv'
+ )
+ self.request_params = dict(
+ params='expandSubcollections=true'
+ )
+
+ def is_version_less_than_12(self):
+ version = self.api.tmos_version
+ if LooseVersion(version) < LooseVersion('12.0.0'):
+ return True
+ else:
+ return False
+
+ def format_string_facts(self, parameters):
+ result = dict()
+ for attribute in self.attributes_to_remove:
+ parameters.pop(attribute, None)
+ for key, val in parameters.iteritems():
+ result[key] = str(val)
+ return result
+
+ def filter_matches_name(self, name):
+ if not self.params['filter']:
+ return True
+ matches = re.match(self.params['filter'], str(name))
+ if matches:
+ return True
+ else:
+ return False
+
+ def get_facts_from_collection(self, collection, collection_type=None):
+ results = []
+ for item in collection:
+ if not self.filter_matches_name(item.name):
+ continue
+ facts = self.format_facts(item, collection_type)
+ results.append(facts)
+ return results
+
+ def connect_to_bigip(self, **kwargs):
+ return ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+
+class BigIpGtmFactsPools(BigIpGtmFactsCommon):
+ def __init__(self, *args, **kwargs):
+ super(BigIpGtmFactsPools, self).__init__()
+ self.params = kwargs
+
+ def get_facts(self):
+ self.api = self.connect_to_bigip(**self.params)
+ return self.get_facts_from_device()
+
+ def get_facts_from_device(self):
+ try:
+ if self.is_version_less_than_12():
+ return self.get_facts_without_types()
+ else:
+ return self.get_facts_with_types()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ def get_facts_with_types(self):
+ result = []
+ for key, type in self.gtm_types.iteritems():
+ facts = self.get_all_facts_by_type(key, type)
+ if facts:
+ result.append(facts)
+ return result
+
+ def get_facts_without_types(self):
+ pools = self.api.tm.gtm.pools.get_collection(**self.request_params)
+ return self.get_facts_from_collection(pools)
+
+ def get_all_facts_by_type(self, key, type):
+ collection = getattr(self.api.tm.gtm.pools, key)
+ pools = collection.get_collection(**self.request_params)
+ return self.get_facts_from_collection(pools, type)
+
+ def format_facts(self, pool, collection_type):
+ result = dict()
+ pool_dict = pool.to_dict()
+ result.update(self.format_string_facts(pool_dict))
+ result.update(self.format_member_facts(pool))
+ if collection_type:
+ result['type'] = collection_type
+ return camel_dict_to_snake_dict(result)
+
+ def format_member_facts(self, pool):
+ result = []
+ if not 'items' in pool.membersReference:
+ return dict(members=[])
+ for member in pool.membersReference['items']:
+ member_facts = self.format_string_facts(member)
+ result.append(member_facts)
+ return dict(members=result)
+
+
+class BigIpGtmFactsWideIps(BigIpGtmFactsCommon):
+ def __init__(self, *args, **kwargs):
+ super(BigIpGtmFactsWideIps, self).__init__()
+ self.params = kwargs
+
+ def get_facts(self):
+ self.api = self.connect_to_bigip(**self.params)
+ return self.get_facts_from_device()
+
+ def get_facts_from_device(self):
+ try:
+ if self.is_version_less_than_12():
+ return self.get_facts_without_types()
+ else:
+ return self.get_facts_with_types()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ def get_facts_with_types(self):
+ result = []
+ for key, type in self.gtm_types.iteritems():
+ facts = self.get_all_facts_by_type(key, type)
+ if facts:
+ result.append(facts)
+ return result
+
+ def get_facts_without_types(self):
+ wideips = self.api.tm.gtm.wideips.get_collection(
+ **self.request_params
+ )
+ return self.get_facts_from_collection(wideips)
+
+ def get_all_facts_by_type(self, key, type):
+ collection = getattr(self.api.tm.gtm.wideips, key)
+ wideips = collection.get_collection(**self.request_params)
+ return self.get_facts_from_collection(wideips, type)
+
+ def format_facts(self, wideip, collection_type):
+ result = dict()
+ wideip_dict = wideip.to_dict()
+ result.update(self.format_string_facts(wideip_dict))
+ result.update(self.format_pool_facts(wideip))
+ if collection_type:
+ result['type'] = collection_type
+ return camel_dict_to_snake_dict(result)
+
+ def format_pool_facts(self, wideip):
+ result = []
+ if not hasattr(wideip, 'pools'):
+ return dict(pools=[])
+ for pool in wideip.pools:
+ pool_facts = self.format_string_facts(pool)
+ result.append(pool_facts)
+ return dict(pools=result)
+
+
+class BigIpGtmFactsVirtualServers(BigIpGtmFactsCommon):
+ def __init__(self, *args, **kwargs):
+ super(BigIpGtmFactsVirtualServers, self).__init__()
+ self.params = kwargs
+
+ def get_facts(self):
+ try:
+ self.api = self.connect_to_bigip(**self.params)
+ return self.get_facts_from_device()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ def get_facts_from_device(self):
+ servers = self.api.tm.gtm.servers.get_collection(
+ **self.request_params
+ )
+ return self.get_facts_from_collection(servers)
+
+ def format_facts(self, server, collection_type=None):
+ result = dict()
+ server_dict = server.to_dict()
+ result.update(self.format_string_facts(server_dict))
+ result.update(self.format_address_facts(server))
+ result.update(self.format_virtual_server_facts(server))
+ return camel_dict_to_snake_dict(result)
+
+ def format_address_facts(self, server):
+ result = []
+ if not hasattr(server, 'addresses'):
+ return dict(addresses=[])
+ for address in server.addresses:
+ address_facts = self.format_string_facts(address)
+ result.append(address_facts)
+ return dict(addresses=result)
+
+ def format_virtual_server_facts(self, server):
+ result = []
+ if not 'items' in server.virtualServersReference:
+ return dict(virtual_servers=[])
+ for server in server.virtualServersReference['items']:
+ server_facts = self.format_string_facts(server)
+ result.append(server_facts)
+ return dict(virtual_servers=result)
+
+class BigIpGtmFactsManager(object):
+ def __init__(self, *args, **kwargs):
+ self.params = kwargs
+ self.api = None
+
+ def get_facts(self):
+ result = dict()
+ facts = dict()
+
+ if 'pool' in self.params['include']:
+ facts['pool'] = self.get_pool_facts()
+ if 'wide_ip' in self.params['include']:
+ facts['wide_ip'] = self.get_wide_ip_facts()
+ if 'virtual_server' in self.params['include']:
+ facts['virtual_server'] = self.get_virtual_server_facts()
+
+ result.update(**facts)
+ result.update(dict(changed=True))
+ return result
+
+ def get_pool_facts(self):
+ pools = BigIpGtmFactsPools(**self.params)
+ return pools.get_facts()
+
+ def get_wide_ip_facts(self):
+ wide_ips = BigIpGtmFactsWideIps(**self.params)
+ return wide_ips.get_facts()
+
+ def get_virtual_server_facts(self):
+ wide_ips = BigIpGtmFactsVirtualServers(**self.params)
+ return wide_ips.get_facts()
+
+
+class BigIpGtmFactsModuleConfig(object):
+ def __init__(self):
+ self.argument_spec = dict()
+ self.meta_args = dict()
+ self.supports_check_mode = False
+ self.valid_includes = ['pool', 'wide_ip', 'virtual_server']
+ self.initialize_meta_args()
+ self.initialize_argument_spec()
+
+ def initialize_meta_args(self):
+ args = dict(
+ include=dict(type='list', required=True),
+ filter=dict(type='str', required=False)
+ )
+ self.meta_args = args
+
+ def initialize_argument_spec(self):
+ self.argument_spec = f5_argument_spec()
+ self.argument_spec.update(self.meta_args)
+
+ def create(self):
+ return AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=self.supports_check_mode
+ )
+
+
+def main():
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ config = BigIpGtmFactsModuleConfig()
+ module = config.create()
+
+ try:
+ obj = BigIpGtmFactsManager(
+ check_mode=module.check_mode, **module.params
+ )
+ result = obj.get_facts()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_gtm_virtual_server.py b/network/f5/bigip_gtm_virtual_server.py
new file mode 100644
index 00000000000..03be3a9df64
--- /dev/null
+++ b/network/f5/bigip_gtm_virtual_server.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Michael Perzel
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_gtm_virtual_server
+short_description: "Manages F5 BIG-IP GTM virtual servers"
+description:
+ - "Manages F5 BIG-IP GTM virtual servers"
+version_added: "2.2"
+author:
+ - Michael Perzel (@perzizzle)
+ - Tim Rupp (@caphrim007)
+notes:
+ - "Requires BIG-IP software version >= 11.4"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+ - "Tested with manager and above account privilege level"
+
+requirements:
+ - bigsuds
+options:
+ state:
+ description:
+ - Virtual server state
+ required: false
+ default: present
+ choices: ['present', 'absent','enabled','disabled']
+ virtual_server_name:
+ description:
+ - Virtual server name
+ required: True
+ virtual_server_server:
+ description:
+ - Virtual server server
+ required: true
+ host:
+ description:
+ - Virtual server host
+ required: false
+ default: None
+ aliases: ['address']
+ port:
+ description:
+ - Virtual server port
+ required: false
+ default: None
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+ - name: Enable virtual server
+ local_action: >
+ bigip_gtm_virtual_server
+ server=192.0.2.1
+ user=admin
+ password=mysecret
+ virtual_server_name=myname
+ virtual_server_server=myserver
+ state=enabled
+'''
+
+RETURN = '''# '''
+
+try:
+ import bigsuds
+except ImportError:
+ bigsuds_found = False
+else:
+ bigsuds_found = True
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.f5 import bigip_api, f5_argument_spec
+
+
+def server_exists(api, server):
+ # hack to determine if virtual server exists
+ result = False
+ try:
+ api.GlobalLB.Server.get_object_status([server])
+ result = True
+ except bigsuds.OperationFailed:
+ e = get_exception()
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def virtual_server_exists(api, name, server):
+ # hack to determine if virtual server exists
+ result = False
+ try:
+ virtual_server_id = {'name': name, 'server': server}
+ api.GlobalLB.VirtualServerV2.get_object_status([virtual_server_id])
+ result = True
+ except bigsuds.OperationFailed:
+ e = get_exception()
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def add_virtual_server(api, virtual_server_name, virtual_server_server, address, port):
+ addresses = {'address': address, 'port': port}
+ virtual_server_id = {'name': virtual_server_name, 'server': virtual_server_server}
+ api.GlobalLB.VirtualServerV2.create([virtual_server_id], [addresses])
+
+
+def remove_virtual_server(api, virtual_server_name, virtual_server_server):
+ virtual_server_id = {'name': virtual_server_name, 'server': virtual_server_server}
+ api.GlobalLB.VirtualServerV2.delete_virtual_server([virtual_server_id])
+
+
+def get_virtual_server_state(api, name, server):
+ virtual_server_id = {'name': name, 'server': server}
+ state = api.GlobalLB.VirtualServerV2.get_enabled_state([virtual_server_id])
+ state = state[0].split('STATE_')[1].lower()
+ return state
+
+
+def set_virtual_server_state(api, name, server, state):
+ virtual_server_id = {'name': name, 'server': server}
+ state = "STATE_%s" % state.strip().upper()
+ api.GlobalLB.VirtualServerV2.set_enabled_state([virtual_server_id], [state])
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ host=dict(type='str', default=None, aliases=['address']),
+ port=dict(type='int', default=None),
+ virtual_server_name=dict(type='str', required=True),
+ virtual_server_server=dict(type='str', required=True)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not bigsuds_found:
+ module.fail_json(msg="the python bigsuds module is required")
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ validate_certs = module.params['validate_certs']
+ user = module.params['user']
+ password = module.params['password']
+ virtual_server_name = module.params['virtual_server_name']
+ virtual_server_server = module.params['virtual_server_server']
+ state = module.params['state']
+ address = module.params['host']
+ port = module.params['port']
+
+ result = {'changed': False} # default
+
+ try:
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
+
+ if state == 'absent':
+ if virtual_server_exists(api, virtual_server_name, virtual_server_server):
+ if not module.check_mode:
+ remove_virtual_server(api, virtual_server_name, virtual_server_server)
+ result = {'changed': True}
+ else:
+ # check-mode return value
+ result = {'changed': True}
+ elif state == 'present':
+ if virtual_server_name and virtual_server_server and address and port:
+ if not virtual_server_exists(api, virtual_server_name, virtual_server_server):
+ if not module.check_mode:
+ if server_exists(api, virtual_server_server):
+ add_virtual_server(api, virtual_server_name, virtual_server_server, address, port)
+ result = {'changed': True}
+ else:
+ module.fail_json(msg="server does not exist")
+ else:
+ # check-mode return value
+ result = {'changed': True}
+ else:
+ # virtual server exists -- potentially modify attributes --future feature
+ result = {'changed': False}
+ else:
+ module.fail_json(msg="Address and port are required to create virtual server")
+ elif state == 'enabled':
+ if not virtual_server_exists(api, virtual_server_name, virtual_server_server):
+ module.fail_json(msg="virtual server does not exist")
+ if state != get_virtual_server_state(api, virtual_server_name, virtual_server_server):
+ if not module.check_mode:
+ set_virtual_server_state(api, virtual_server_name, virtual_server_server, state)
+ result = {'changed': True}
+ else:
+ result = {'changed': True}
+ elif state == 'disabled':
+ if not virtual_server_exists(api, virtual_server_name, virtual_server_server):
+ module.fail_json(msg="virtual server does not exist")
+ if state != get_virtual_server_state(api, virtual_server_name, virtual_server_server):
+ if not module.check_mode:
+ set_virtual_server_state(api, virtual_server_name, virtual_server_server, state)
+ result = {'changed': True}
+ else:
+ result = {'changed': True}
+
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="received exception: %s" % e)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_gtm_wide_ip.py b/network/f5/bigip_gtm_wide_ip.py
new file mode 100644
index 00000000000..c1712902f40
--- /dev/null
+++ b/network/f5/bigip_gtm_wide_ip.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Michael Perzel
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_gtm_wide_ip
+short_description: "Manages F5 BIG-IP GTM wide ip"
+description:
+ - "Manages F5 BIG-IP GTM wide ip"
+version_added: "2.0"
+author:
+ - Michael Perzel (@perzizzle)
+ - Tim Rupp (@caphrim007)
+notes:
+ - "Requires BIG-IP software version >= 11.4"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+ - "Tested with manager and above account privilege level"
+
+requirements:
+ - bigsuds
+options:
+ lb_method:
+ description:
+ - LB method of wide ip
+ required: true
+ choices: ['return_to_dns', 'null', 'round_robin',
+ 'ratio', 'topology', 'static_persist', 'global_availability',
+ 'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
+ 'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
+ 'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
+ wide_ip:
+ description:
+ - Wide IP name
+ required: true
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+ - name: Set lb method
+ local_action: >
+ bigip_gtm_wide_ip
+ server=192.0.2.1
+ user=admin
+ password=mysecret
+ lb_method=round_robin
+ wide_ip=my-wide-ip.example.com
+'''
+
+try:
+ import bigsuds
+except ImportError:
+ bigsuds_found = False
+else:
+ bigsuds_found = True
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.f5 import bigip_api, f5_argument_spec
+
+
+def get_wide_ip_lb_method(api, wide_ip):
+ lb_method = api.GlobalLB.WideIP.get_lb_method(wide_ips=[wide_ip])[0]
+ lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
+ return lb_method
+
+def get_wide_ip_pools(api, wide_ip):
+ try:
+ return api.GlobalLB.WideIP.get_wideip_pool([wide_ip])
+ except Exception:
+ e = get_exception()
+ print(e)
+
+def wide_ip_exists(api, wide_ip):
+ # hack to determine if wide_ip exists
+ result = False
+ try:
+ api.GlobalLB.WideIP.get_object_status(wide_ips=[wide_ip])
+ result = True
+ except bigsuds.OperationFailed:
+ e = get_exception()
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+def set_wide_ip_lb_method(api, wide_ip, lb_method):
+ lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
+ api.GlobalLB.WideIP.set_lb_method(wide_ips=[wide_ip], lb_methods=[lb_method])
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ lb_method_choices = ['return_to_dns', 'null', 'round_robin',
+ 'ratio', 'topology', 'static_persist', 'global_availability',
+ 'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
+ 'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
+ 'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
+ meta_args = dict(
+ lb_method = dict(type='str', required=True, choices=lb_method_choices),
+ wide_ip = dict(type='str', required=True)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not bigsuds_found:
+ module.fail_json(msg="the python bigsuds module is required")
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ wide_ip = module.params['wide_ip']
+ lb_method = module.params['lb_method']
+ validate_certs = module.params['validate_certs']
+
+ result = {'changed': False} # default
+
+ try:
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
+
+ if not wide_ip_exists(api, wide_ip):
+ module.fail_json(msg="wide ip %s does not exist" % wide_ip)
+
+ if lb_method is not None and lb_method != get_wide_ip_lb_method(api, wide_ip):
+ if not module.check_mode:
+ set_wide_ip_lb_method(api, wide_ip, lb_method)
+ result = {'changed': True}
+ else:
+ result = {'changed': True}
+
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="received exception: %s" % e)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_hostname.py b/network/f5/bigip_hostname.py
new file mode 100644
index 00000000000..9dc9d085c5a
--- /dev/null
+++ b/network/f5/bigip_hostname.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_hostname
+short_description: Manage the hostname of a BIG-IP.
+description:
+ - Manage the hostname of a BIG-IP.
+version_added: "2.3"
+options:
+ hostname:
+ description:
+ - Hostname of the BIG-IP host.
+ required: true
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Set the hostname of the BIG-IP
+ bigip_hostname:
+ hostname: "bigip.localhost.localdomain"
+ password: "admin"
+ server: "bigip.localhost.localdomain"
+ user: "admin"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+hostname:
+ description: The new hostname of the device
+ returned: changed
+ type: string
+ sample: "big-ip01.internal"
+'''
+
+try:
+ from f5.bigip.contexts import TransactionContextManager
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+class BigIpHostnameManager(object):
+ def __init__(self, *args, **kwargs):
+ self.changed_params = dict()
+ self.params = kwargs
+ self.api = None
+
+ def connect_to_bigip(self, **kwargs):
+ return ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def ensure_hostname_is_present(self):
+ self.changed_params['hostname'] = self.params['hostname']
+
+ if self.params['check_mode']:
+ return True
+
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ r = api.tm.sys.global_settings.load()
+ r.update(hostname=self.params['hostname'])
+
+ if self.hostname_exists():
+ return True
+ else:
+ raise F5ModuleError("Failed to set the hostname")
+
+ def hostname_exists(self):
+ if self.params['hostname'] == self.current_hostname():
+ return True
+ else:
+ return False
+
+ def present(self):
+ if self.hostname_exists():
+ return False
+ else:
+
+ return self.ensure_hostname_is_present()
+
+ def current_hostname(self):
+ r = self.api.tm.sys.global_settings.load()
+ return r.hostname
+
+ def apply_changes(self):
+ result = dict()
+
+ changed = self.apply_to_running_config()
+ if changed:
+ self.save_running_config()
+
+ result.update(**self.changed_params)
+ result.update(dict(changed=changed))
+ return result
+
+ def apply_to_running_config(self):
+ try:
+ self.api = self.connect_to_bigip(**self.params)
+ return self.present()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ def save_running_config(self):
+ self.api.tm.sys.config.exec_cmd('save')
+
+
+class BigIpHostnameModuleConfig(object):
+ def __init__(self):
+ self.argument_spec = dict()
+ self.meta_args = dict()
+ self.supports_check_mode = True
+
+ self.initialize_meta_args()
+ self.initialize_argument_spec()
+
+ def initialize_meta_args(self):
+ args = dict(
+ hostname=dict(required=True)
+ )
+ self.meta_args = args
+
+ def initialize_argument_spec(self):
+ self.argument_spec = f5_argument_spec()
+ self.argument_spec.update(self.meta_args)
+
+ def create(self):
+ return AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=self.supports_check_mode
+ )
+
+
+def main():
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ config = BigIpHostnameModuleConfig()
+ module = config.create()
+
+ try:
+ obj = BigIpHostnameManager(
+ check_mode=module.check_mode, **module.params
+ )
+ result = obj.apply_changes()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_irule.py b/network/f5/bigip_irule.py
new file mode 100644
index 00000000000..52b8f30fb58
--- /dev/null
+++ b/network/f5/bigip_irule.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_irule
+short_description: Manage iRules across different modules on a BIG-IP.
+description:
+ - Manage iRules across different modules on a BIG-IP.
+version_added: "2.2"
+options:
+ content:
+ description:
+ - When used instead of 'src', sets the contents of an iRule directly to
+ the specified value. This is for simple values, but can be used with
+ lookup plugins for anything complex or with formatting. Either one
+ of C(src) or C(content) must be provided.
+ module:
+ description:
+ - The BIG-IP module to add the iRule to.
+ required: true
+ choices:
+ - ltm
+ - gtm
+ partition:
+ description:
+ - The partition to create the iRule on.
+ required: false
+ default: Common
+ name:
+ description:
+ - The name of the iRule.
+ required: true
+ src:
+ description:
+ - The iRule file to interpret and upload to the BIG-IP. Either one
+ of C(src) or C(content) must be provided.
+ required: true
+ state:
+ description:
+ - Whether the iRule should exist or not.
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as
+ pip install f5-sdk.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Add the iRule contained in templated irule.tcl to the LTM module
+ bigip_irule:
+ content: "{{ lookup('template', 'irule-template.tcl') }}"
+ module: "ltm"
+ name: "MyiRule"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "present"
+ user: "admin"
+ delegate_to: localhost
+
+- name: Add the iRule contained in static file irule.tcl to the LTM module
+ bigip_irule:
+ module: "ltm"
+ name: "MyiRule"
+ password: "secret"
+ server: "lb.mydomain.com"
+ src: "irule-static.tcl"
+ state: "present"
+ user: "admin"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+module:
+ description: The module that the iRule was added to
+ returned: changed and success
+ type: string
+ sample: "gtm"
+src:
+ description: The filename that included the iRule source
+ returned: changed and success, when provided
+ type: string
+ sample: "/opt/src/irules/example1.tcl"
+name:
+ description: The name of the iRule that was managed
+ returned: changed and success
+ type: string
+ sample: "my-irule"
+content:
+ description: The content of the iRule that was managed
+ returned: changed and success
+ type: string
+ sample: "when LB_FAILED { set wipHost [LB::server addr] }"
+partition:
+ description: The partition in which the iRule was managed
+ returned: changed and success
+ type: string
+ sample: "Common"
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+MODULES = ['gtm', 'ltm']
+
+
+class BigIpiRule(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ if kwargs['state'] != 'absent':
+ if not kwargs['content'] and not kwargs['src']:
+ raise F5ModuleError(
+ "Either 'content' or 'src' must be provided"
+ )
+
+ source = kwargs['src']
+ if source:
+ with open(source) as f:
+ kwargs['content'] = f.read()
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+
+ try:
+ if state == "present":
+ changed = self.present()
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ name = self.params['name']
+ partition = self.params['partition']
+ module = self.params['module']
+
+ if module == 'ltm':
+ r = self.api.tm.ltm.rules.rule.load(
+ name=name,
+ partition=partition
+ )
+ elif module == 'gtm':
+ r = self.api.tm.gtm.rules.rule.load(
+ name=name,
+ partition=partition
+ )
+
+ if hasattr(r, 'apiAnonymous'):
+ p['content'] = str(r.apiAnonymous.strip())
+ p['name'] = name
+ return p
+
+ def delete(self):
+ params = dict()
+ check_mode = self.params['check_mode']
+ module = self.params['module']
+
+ params['name'] = self.params['name']
+ params['partition'] = self.params['partition']
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ if module == 'ltm':
+ r = self.api.tm.ltm.rules.rule.load(**params)
+ r.delete()
+ elif module == 'gtm':
+ r = self.api.tm.gtm.rules.rule.load(**params)
+ r.delete()
+
+ if self.exists():
+ raise F5ModuleError("Failed to delete the iRule")
+ return True
+
+ def exists(self):
+ name = self.params['name']
+ partition = self.params['partition']
+ module = self.params['module']
+
+ if module == 'ltm':
+ return self.api.tm.ltm.rules.rule.exists(
+ name=name,
+ partition=partition
+ )
+ elif module == 'gtm':
+ return self.api.tm.gtm.rules.rule.exists(
+ name=name,
+ partition=partition
+ )
+
+ def present(self):
+ if self.exists():
+ return self.update()
+ else:
+ return self.create()
+
+ def update(self):
+ params = dict()
+ current = self.read()
+ changed = False
+
+ check_mode = self.params['check_mode']
+ content = self.params['content']
+ name = self.params['name']
+ partition = self.params['partition']
+ module = self.params['module']
+
+ if content is not None:
+ content = content.strip()
+ if 'content' in current:
+ if content != current['content']:
+ params['apiAnonymous'] = content
+ else:
+ params['apiAnonymous'] = content
+
+ if params:
+ changed = True
+ params['name'] = name
+ params['partition'] = partition
+ self.cparams = camel_dict_to_snake_dict(params)
+ if 'api_anonymous' in self.cparams:
+ self.cparams['content'] = self.cparams.pop('api_anonymous')
+ if self.params['src']:
+ self.cparams['src'] = self.params['src']
+
+ if check_mode:
+ return changed
+ else:
+ return changed
+
+ if module == 'ltm':
+ d = self.api.tm.ltm.rules.rule.load(
+ name=name,
+ partition=partition
+ )
+ d.update(**params)
+ d.refresh()
+ elif module == 'gtm':
+ d = self.api.tm.gtm.rules.rule.load(
+ name=name,
+ partition=partition
+ )
+ d.update(**params)
+ d.refresh()
+
+ return True
+
+ def create(self):
+ params = dict()
+
+ check_mode = self.params['check_mode']
+ content = self.params['content']
+ name = self.params['name']
+ partition = self.params['partition']
+ module = self.params['module']
+
+ if check_mode:
+ return True
+
+ if content is not None:
+ params['apiAnonymous'] = content.strip()
+
+ params['name'] = name
+ params['partition'] = partition
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if 'api_anonymous' in self.cparams:
+ self.cparams['content'] = self.cparams.pop('api_anonymous')
+ if self.params['src']:
+ self.cparams['src'] = self.params['src']
+
+ if check_mode:
+ return True
+
+ if module == 'ltm':
+ d = self.api.tm.ltm.rules.rule
+ d.create(**params)
+ elif module == 'gtm':
+ d = self.api.tm.gtm.rules.rule
+ d.create(**params)
+
+ if not self.exists():
+ raise F5ModuleError("Failed to create the iRule")
+ return True
+
+ def absent(self):
+ changed = False
+
+ if self.exists():
+ changed = self.delete()
+
+ return changed
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ content=dict(required=False, default=None),
+ src=dict(required=False, default=None),
+ name=dict(required=True),
+ module=dict(required=True, choices=MODULES)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['content', 'src']
+ ]
+ )
+
+ try:
+ obj = BigIpiRule(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py
index ea24e995e27..02017569c8c 100644
--- a/network/f5/bigip_monitor_http.py
+++ b/network/f5/bigip_monitor_http.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-
+#
# (c) 2013, serge van Ginderachter
# based on Matt Hite's bigip_pool module
# (c) 2013, Matt Hite
@@ -20,156 +20,141 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: bigip_monitor_http
short_description: "Manages F5 BIG-IP LTM http monitors"
description:
- - "Manages F5 BIG-IP LTM monitors via iControl SOAP API"
+ - Manages F5 BIG-IP LTM monitors via iControl SOAP API
version_added: "1.4"
-author: "Serge van Ginderachter (@srvg)"
+author:
+ - Serge van Ginderachter (@srvg)
+ - Tim Rupp (@caphrim007)
notes:
- - "Requires BIG-IP software version >= 11"
- - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- - "Best run as a local_action in your playbook"
- - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
+ - "Requires BIG-IP software version >= 11"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+ - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
requirements:
- - bigsuds
+ - bigsuds
options:
- server:
- description:
- - BIG-IP host
- required: true
- default: null
- user:
- description:
- - BIG-IP username
- required: true
- default: null
- password:
- description:
- - BIG-IP password
- required: true
- default: null
- validate_certs:
- description:
- - If C(no), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
- required: false
- default: 'yes'
- choices: ['yes', 'no']
- version_added: 2.0
- state:
- description:
- - Monitor state
- required: false
- default: 'present'
- choices: ['present', 'absent']
- name:
- description:
- - Monitor name
- required: true
- default: null
- aliases: ['monitor']
- partition:
- description:
- - Partition for the monitor
- required: false
- default: 'Common'
- parent:
- description:
- - The parent template of this monitor template
- required: false
- default: 'http'
- parent_partition:
- description:
- - Partition for the parent monitor
- required: false
- default: 'Common'
- send:
- description:
- - The send string for the monitor call
- required: true
- default: none
- receive:
- description:
- - The receive string for the monitor call
- required: true
- default: none
- receive_disable:
- description:
- - The receive disable string for the monitor call
- required: true
- default: none
- ip:
- description:
- - IP address part of the ipport definition. The default API setting
- is "0.0.0.0".
- required: false
- default: none
- port:
- description:
- - port address part op the ipport definition. The default API
- setting is 0.
- required: false
- default: none
- interval:
- description:
- - The interval specifying how frequently the monitor instance
- of this template will run. By default, this interval is used for up and
- down states. The default API setting is 5.
- required: false
- default: none
- timeout:
- description:
- - The number of seconds in which the node or service must respond to
- the monitor request. If the target responds within the set time
- period, it is considered up. If the target does not respond within
- the set time period, it is considered down. You can change this
- number to any number you want, however, it should be 3 times the
- interval number of seconds plus 1 second. The default API setting
- is 16.
- required: false
- default: none
- time_until_up:
- description:
- - Specifies the amount of time in seconds after the first successful
- response before a node will be marked up. A value of 0 will cause a
- node to be marked up immediately after a valid response is received
- from the node. The default API setting is 0.
- required: false
- default: none
+ state:
+ description:
+ - Monitor state
+ required: false
+ default: 'present'
+ choices:
+ - present
+ - absent
+ name:
+ description:
+ - Monitor name
+ required: true
+ default: null
+ aliases:
+ - monitor
+ partition:
+ description:
+ - Partition for the monitor
+ required: false
+ default: 'Common'
+ parent:
+ description:
+ - The parent template of this monitor template
+ required: false
+ default: 'http'
+ parent_partition:
+ description:
+ - Partition for the parent monitor
+ required: false
+ default: 'Common'
+ send:
+ description:
+ - The send string for the monitor call
+ required: true
+ default: none
+ receive:
+ description:
+ - The receive string for the monitor call
+ required: true
+ default: none
+ receive_disable:
+ description:
+ - The receive disable string for the monitor call
+ required: true
+ default: none
+ ip:
+ description:
+ - IP address part of the ipport definition. The default API setting
+ is "0.0.0.0".
+ required: false
+ default: none
+ port:
+ description:
+ - Port address part of the ip/port definition. The default API
+ setting is 0.
+ required: false
+ default: none
+ interval:
+ description:
+ - The interval specifying how frequently the monitor instance
+ of this template will run. By default, this interval is used for up and
+ down states. The default API setting is 5.
+ required: false
+ default: none
+ timeout:
+ description:
+ - The number of seconds in which the node or service must respond to
+ the monitor request. If the target responds within the set time
+ period, it is considered up. If the target does not respond within
+ the set time period, it is considered down. You can change this
+ number to any number you want, however, it should be 3 times the
+ interval number of seconds plus 1 second. The default API setting
+ is 16.
+ required: false
+ default: none
+ time_until_up:
+ description:
+ - Specifies the amount of time in seconds after the first successful
+ response before a node will be marked up. A value of 0 will cause a
+ node to be marked up immediately after a valid response is received
+ from the node. The default API setting is 0.
+ required: false
+ default: none
+extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: BIGIP F5 | Create HTTP Monitor
- local_action:
- module: bigip_monitor_http
- state: present
- server: "{{ f5server }}"
- user: "{{ f5user }}"
- password: "{{ f5password }}"
- name: "{{ item.monitorname }}"
- send: "{{ item.send }}"
- receive: "{{ item.receive }}"
- with_items: f5monitors
+ bigip_monitor_http:
+ state: "present"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my_http_monitor"
+ send: "http string to send"
+ receive: "http string to receive"
+ delegate_to: localhost
+
- name: BIGIP F5 | Remove HTTP Monitor
- local_action:
- module: bigip_monitor_http
- state: absent
- server: "{{ f5server }}"
- user: "{{ f5user }}"
- password: "{{ f5password }}"
- name: "{{ monitorname }}"
+ bigip_monitor_http:
+ state: "absent"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my_http_monitor"
+ delegate_to: localhost
'''
TEMPLATE_TYPE = 'TTYPE_HTTP'
DEFAULT_PARENT_TYPE = 'http'
-
def check_monitor_exists(module, api, monitor, parent):
-
# hack to determine if monitor exists
result = False
try:
@@ -179,7 +164,7 @@ def check_monitor_exists(module, api, monitor, parent):
result = True
else:
module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
@@ -189,10 +174,15 @@ def check_monitor_exists(module, api, monitor, parent):
def create_monitor(api, monitor, template_attributes):
-
try:
- api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes])
- except bigsuds.OperationFailed, e:
+ api.LocalLB.Monitor.create_template(
+ templates=[{
+ 'template_name': monitor,
+ 'template_type': TEMPLATE_TYPE
+ }],
+ template_attributes=[template_attributes]
+ )
+ except bigsuds.OperationFailed as e:
if "already exists" in str(e):
return False
else:
@@ -202,10 +192,9 @@ def create_monitor(api, monitor, template_attributes):
def delete_monitor(api, monitor):
-
try:
api.LocalLB.Monitor.delete_template(template_names=[monitor])
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
# maybe it was deleted since we checked
if "was not found" in str(e):
return False
@@ -216,10 +205,12 @@ def delete_monitor(api, monitor):
def check_string_property(api, monitor, str_property):
-
try:
- return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0]
- except bigsuds.OperationFailed, e:
+ template_prop = api.LocalLB.Monitor.get_template_string_property(
+ [monitor], [str_property['type']]
+ )[0]
+ return str_property == template_prop
+ except bigsuds.OperationFailed as e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
@@ -229,15 +220,19 @@ def check_string_property(api, monitor, str_property):
def set_string_property(api, monitor, str_property):
-
- api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property])
+ api.LocalLB.Monitor.set_template_string_property(
+ template_names=[monitor],
+ values=[str_property]
+ )
def check_integer_property(api, monitor, int_property):
-
try:
- return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0]
- except bigsuds.OperationFailed, e:
+ template_prop = api.LocalLB.Monitor.get_template_integer_property(
+ [monitor], [int_property['type']]
+ )[0]
+ return int_property == template_prop
+ except bigsuds.OperationFailed as e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
@@ -246,10 +241,11 @@ def check_integer_property(api, monitor, int_property):
raise
-
def set_integer_property(api, monitor, int_property):
-
- api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property])
+ api.LocalLB.Monitor.set_template_integer_property(
+ template_names=[monitor],
+ values=[int_property]
+ )
def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
@@ -269,54 +265,53 @@ def update_monitor_properties(api, module, monitor, template_string_properties,
def get_ipport(api, monitor):
-
return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0]
def set_ipport(api, monitor, ipport):
-
try:
- api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport])
+ api.LocalLB.Monitor.set_template_destination(
+ template_names=[monitor], destinations=[ipport]
+ )
return True, ""
-
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "Cannot modify the address type of monitor" in str(e):
return False, "Cannot modify the address type of monitor if already assigned to a pool."
else:
# genuine exception
raise
-# ===========================================
-# main loop
-#
-# writing a module for other monitor types should
-# only need an updated main() (and monitor specific functions)
def main():
-
- # begin monitor specific stuff
- argument_spec=f5_argument_spec();
- argument_spec.update( dict(
- name = dict(required=True),
- parent = dict(default=DEFAULT_PARENT_TYPE),
- parent_partition = dict(default='Common'),
- send = dict(required=False),
- receive = dict(required=False),
- receive_disable = dict(required=False),
- ip = dict(required=False),
- port = dict(required=False, type='int'),
- interval = dict(required=False, type='int'),
- timeout = dict(required=False, type='int'),
- time_until_up = dict(required=False, type='int', default=0)
- )
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ name=dict(required=True),
+ parent=dict(default=DEFAULT_PARENT_TYPE),
+ parent_partition=dict(default='Common'),
+ send=dict(required=False),
+ receive=dict(required=False),
+ receive_disable=dict(required=False),
+ ip=dict(required=False),
+ port=dict(required=False, type='int'),
+ interval=dict(required=False, type='int'),
+ timeout=dict(required=False, type='int'),
+ time_until_up=dict(required=False, type='int', default=0)
)
+ argument_spec.update(meta_args)
module = AnsibleModule(
- argument_spec = argument_spec,
+ argument_spec=argument_spec,
supports_check_mode=True
)
- (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
parent_partition = module.params['parent_partition']
name = module.params['name']
@@ -333,18 +328,17 @@ def main():
# end monitor specific stuff
- api = bigip_api(server, user, password)
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
monitor_exists = check_monitor_exists(module, api, monitor, parent)
-
# ipport is a special setting
- if monitor_exists: # make sure to not update current settings if not asked
+ if monitor_exists:
cur_ipport = get_ipport(api, monitor)
if ip is None:
ip = cur_ipport['ipport']['address']
if port is None:
port = cur_ipport['ipport']['port']
- else: # use API defaults if not defined to create it
+ else:
if interval is None:
interval = 5
if timeout is None:
@@ -389,19 +383,26 @@ def main():
{'type': 'STYPE_RECEIVE_DRAIN',
'value': receive_disable}]
- template_integer_properties = [{'type': 'ITYPE_INTERVAL',
- 'value': interval},
- {'type': 'ITYPE_TIMEOUT',
- 'value': timeout},
- {'type': 'ITYPE_TIME_UNTIL_UP',
- 'value': time_until_up}]
+ template_integer_properties = [
+ {
+ 'type': 'ITYPE_INTERVAL',
+ 'value': interval
+ },
+ {
+ 'type': 'ITYPE_TIMEOUT',
+ 'value': timeout
+ },
+ {
+ 'type': 'ITYPE_TIME_UNTIL_UP',
+ 'value': time_until_up
+ }
+ ]
# main logic, monitor generic
try:
result = {'changed': False} # default
-
if state == 'absent':
if monitor_exists:
if not module.check_mode:
@@ -410,10 +411,9 @@ def main():
result['changed'] |= delete_monitor(api, monitor)
else:
result['changed'] |= True
-
- else: # state present
- ## check for monitor itself
- if not monitor_exists: # create it
+ else:
+ # check for monitor itself
+ if not monitor_exists:
if not module.check_mode:
# again, check changed status here b/c race conditions
# if other task already created it
@@ -421,22 +421,20 @@ def main():
else:
result['changed'] |= True
- ## check for monitor parameters
+ # check for monitor parameters
# whether it already existed, or was just created, now update
# the update functions need to check for check mode but
# cannot update settings if it doesn't exist which happens in check mode
result['changed'] |= update_monitor_properties(api, module, monitor,
- template_string_properties,
- template_integer_properties)
+ template_string_properties,
+ template_integer_properties)
# we just have to update the ipport if monitor already exists and it's different
if monitor_exists and cur_ipport != ipport:
set_ipport(api, monitor, ipport)
result['changed'] |= True
- #else: monitor doesn't exist (check mode) or ipport is already ok
-
-
- except Exception, e:
+ # else: monitor doesn't exist (check mode) or ipport is already ok
+ except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
@@ -444,5 +442,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py
index 0900e95fd20..aedc71f642b 100644
--- a/network/f5/bigip_monitor_tcp.py
+++ b/network/f5/bigip_monitor_tcp.py
@@ -18,167 +18,154 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: bigip_monitor_tcp
short_description: "Manages F5 BIG-IP LTM tcp monitors"
description:
- - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API"
+ - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API"
version_added: "1.4"
-author: "Serge van Ginderachter (@srvg)"
+author:
+ - Serge van Ginderachter (@srvg)
+ - Tim Rupp (@caphrim007)
notes:
- - "Requires BIG-IP software version >= 11"
- - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- - "Best run as a local_action in your playbook"
- - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
+ - "Requires BIG-IP software version >= 11"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+ - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
requirements:
- - bigsuds
+ - bigsuds
options:
- server:
- description:
- - BIG-IP host
- required: true
- default: null
- user:
- description:
- - BIG-IP username
- required: true
- default: null
- password:
- description:
- - BIG-IP password
- required: true
- default: null
- validate_certs:
- description:
- - If C(no), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
- required: false
- default: 'yes'
- choices: ['yes', 'no']
- version_added: 2.0
- state:
- description:
- - Monitor state
- required: false
- default: 'present'
- choices: ['present', 'absent']
- name:
- description:
- - Monitor name
- required: true
- default: null
- aliases: ['monitor']
- partition:
- description:
- - Partition for the monitor
- required: false
- default: 'Common'
- type:
- description:
- - The template type of this monitor template
- required: false
- default: 'tcp'
- choices: [ 'TTYPE_TCP', 'TTYPE_TCP_ECHO', 'TTYPE_TCP_HALF_OPEN']
- parent:
- description:
- - The parent template of this monitor template
- required: false
- default: 'tcp'
- choices: [ 'tcp', 'tcp_echo', 'tcp_half_open']
- parent_partition:
- description:
- - Partition for the parent monitor
- required: false
- default: 'Common'
- send:
- description:
- - The send string for the monitor call
- required: true
- default: none
- receive:
- description:
- - The receive string for the monitor call
- required: true
- default: none
- ip:
- description:
- - IP address part of the ipport definition. The default API setting
- is "0.0.0.0".
- required: false
- default: none
- port:
- description:
- - port address part op the ipport definition. The default API
- setting is 0.
- required: false
- default: none
- interval:
- description:
- - The interval specifying how frequently the monitor instance
- of this template will run. By default, this interval is used for up and
- down states. The default API setting is 5.
- required: false
- default: none
- timeout:
- description:
- - The number of seconds in which the node or service must respond to
- the monitor request. If the target responds within the set time
- period, it is considered up. If the target does not respond within
- the set time period, it is considered down. You can change this
- number to any number you want, however, it should be 3 times the
- interval number of seconds plus 1 second. The default API setting
- is 16.
- required: false
- default: none
- time_until_up:
- description:
- - Specifies the amount of time in seconds after the first successful
- response before a node will be marked up. A value of 0 will cause a
- node to be marked up immediately after a valid response is received
- from the node. The default API setting is 0.
- required: false
- default: none
+ state:
+ description:
+ - Monitor state
+ required: false
+ default: 'present'
+ choices:
+ - present
+ - absent
+ name:
+ description:
+ - Monitor name
+ required: true
+ default: null
+ aliases:
+ - monitor
+ partition:
+ description:
+ - Partition for the monitor
+ required: false
+ default: 'Common'
+ type:
+ description:
+ - The template type of this monitor template
+ required: false
+ default: 'tcp'
+ choices:
+ - TTYPE_TCP
+ - TTYPE_TCP_ECHO
+ - TTYPE_TCP_HALF_OPEN
+ parent:
+ description:
+ - The parent template of this monitor template
+ required: false
+ default: 'tcp'
+ choices:
+ - tcp
+ - tcp_echo
+ - tcp_half_open
+ parent_partition:
+ description:
+ - Partition for the parent monitor
+ required: false
+ default: 'Common'
+ send:
+ description:
+ - The send string for the monitor call
+ required: true
+ default: none
+ receive:
+ description:
+ - The receive string for the monitor call
+ required: true
+ default: none
+ ip:
+ description:
+ - IP address part of the ipport definition. The default API setting
+ is "0.0.0.0".
+ required: false
+ default: none
+ port:
+ description:
+ - Port address part op the ipport definition. The default API
+ setting is 0.
+ required: false
+ default: none
+ interval:
+ description:
+ - The interval specifying how frequently the monitor instance
+ of this template will run. By default, this interval is used for up and
+ down states. The default API setting is 5.
+ required: false
+ default: none
+ timeout:
+ description:
+ - The number of seconds in which the node or service must respond to
+ the monitor request. If the target responds within the set time
+ period, it is considered up. If the target does not respond within
+ the set time period, it is considered down. You can change this
+ number to any number you want, however, it should be 3 times the
+ interval number of seconds plus 1 second. The default API setting
+ is 16.
+ required: false
+ default: none
+ time_until_up:
+ description:
+ - Specifies the amount of time in seconds after the first successful
+ response before a node will be marked up. A value of 0 will cause a
+ node to be marked up immediately after a valid response is received
+ from the node. The default API setting is 0.
+ required: false
+ default: none
+extends_documentation_fragment: f5
'''
EXAMPLES = '''
-
-- name: BIGIP F5 | Create TCP Monitor
- local_action:
- module: bigip_monitor_tcp
- state: present
- server: "{{ f5server }}"
- user: "{{ f5user }}"
- password: "{{ f5password }}"
- name: "{{ item.monitorname }}"
- type: tcp
- send: "{{ item.send }}"
- receive: "{{ item.receive }}"
- with_items: f5monitors-tcp
-- name: BIGIP F5 | Create TCP half open Monitor
- local_action:
- module: bigip_monitor_tcp
- state: present
- server: "{{ f5server }}"
- user: "{{ f5user }}"
- password: "{{ f5password }}"
- name: "{{ item.monitorname }}"
- type: tcp
- send: "{{ item.send }}"
- receive: "{{ item.receive }}"
- with_items: f5monitors-halftcp
-- name: BIGIP F5 | Remove TCP Monitor
- local_action:
- module: bigip_monitor_tcp
- state: absent
- server: "{{ f5server }}"
- user: "{{ f5user }}"
- password: "{{ f5password }}"
- name: "{{ monitorname }}"
- with_flattened:
- - f5monitors-tcp
- - f5monitors-halftcp
-
+- name: Create TCP Monitor
+ bigip_monitor_tcp:
+ state: "present"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my_tcp_monitor"
+ type: "tcp"
+ send: "tcp string to send"
+ receive: "tcp string to receive"
+ delegate_to: localhost
+
+- name: Create TCP half open Monitor
+ bigip_monitor_tcp:
+ state: "present"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my_tcp_monitor"
+ type: "tcp"
+ send: "tcp string to send"
+ receive: "http string to receive"
+ delegate_to: localhost
+
+- name: Remove TCP Monitor
+ bigip_monitor_tcp:
+ state: "absent"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my_tcp_monitor"
'''
TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP'
@@ -187,7 +174,6 @@
def check_monitor_exists(module, api, monitor, parent):
-
# hack to determine if monitor exists
result = False
try:
@@ -197,7 +183,7 @@ def check_monitor_exists(module, api, monitor, parent):
result = True
else:
module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
@@ -207,10 +193,15 @@ def check_monitor_exists(module, api, monitor, parent):
def create_monitor(api, monitor, template_attributes):
-
try:
- api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes])
- except bigsuds.OperationFailed, e:
+ api.LocalLB.Monitor.create_template(
+ templates=[{
+ 'template_name': monitor,
+ 'template_type': TEMPLATE_TYPE
+ }],
+ template_attributes=[template_attributes]
+ )
+ except bigsuds.OperationFailed as e:
if "already exists" in str(e):
return False
else:
@@ -220,10 +211,9 @@ def create_monitor(api, monitor, template_attributes):
def delete_monitor(api, monitor):
-
try:
api.LocalLB.Monitor.delete_template(template_names=[monitor])
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
# maybe it was deleted since we checked
if "was not found" in str(e):
return False
@@ -234,41 +224,46 @@ def delete_monitor(api, monitor):
def check_string_property(api, monitor, str_property):
-
try:
- return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0]
- except bigsuds.OperationFailed, e:
+ template_prop = api.LocalLB.Monitor.get_template_string_property(
+ [monitor], [str_property['type']]
+ )[0]
+ return str_property == template_prop
+ except bigsuds.OperationFailed as e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
- return True
def set_string_property(api, monitor, str_property):
-
- api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property])
+ api.LocalLB.Monitor.set_template_string_property(
+ template_names=[monitor],
+ values=[str_property]
+ )
def check_integer_property(api, monitor, int_property):
-
try:
- return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0]
- except bigsuds.OperationFailed, e:
+ return int_property == api.LocalLB.Monitor.get_template_integer_property(
+ [monitor], [int_property['type']]
+ )[0]
+ except bigsuds.OperationFailed as e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
- return True
def set_integer_property(api, monitor, int_property):
-
- api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property])
+ api.LocalLB.Monitor.set_template_integer_property(
+ template_names=[monitor],
+ values=[int_property]
+ )
def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
@@ -278,6 +273,7 @@ def update_monitor_properties(api, module, monitor, template_string_properties,
if not module.check_mode:
set_string_property(api, monitor, str_property)
changed = True
+
for int_property in template_integer_properties:
if int_property['value'] is not None and not check_integer_property(api, monitor, int_property):
if not module.check_mode:
@@ -288,54 +284,59 @@ def update_monitor_properties(api, module, monitor, template_string_properties,
def get_ipport(api, monitor):
-
return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0]
def set_ipport(api, monitor, ipport):
-
try:
- api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport])
+ api.LocalLB.Monitor.set_template_destination(
+ template_names=[monitor], destinations=[ipport]
+ )
return True, ""
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "Cannot modify the address type of monitor" in str(e):
return False, "Cannot modify the address type of monitor if already assigned to a pool."
else:
# genuine exception
raise
-# ===========================================
-# main loop
-#
-# writing a module for other monitor types should
-# only need an updated main() (and monitor specific functions)
def main():
-
- # begin monitor specific stuff
- argument_spec=f5_argument_spec();
- argument_spec.update(dict(
- name = dict(required=True),
- type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES),
- parent = dict(default=DEFAULT_PARENT),
- parent_partition = dict(default='Common'),
- send = dict(required=False),
- receive = dict(required=False),
- ip = dict(required=False),
- port = dict(required=False, type='int'),
- interval = dict(required=False, type='int'),
- timeout = dict(required=False, type='int'),
- time_until_up = dict(required=False, type='int', default=0)
- )
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ name=dict(required=True),
+ type=dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES),
+ parent=dict(default=DEFAULT_PARENT),
+ parent_partition=dict(default='Common'),
+ send=dict(required=False),
+ receive=dict(required=False),
+ ip=dict(required=False),
+ port=dict(required=False, type='int'),
+ interval=dict(required=False, type='int'),
+ timeout=dict(required=False, type='int'),
+ time_until_up=dict(required=False, type='int', default=0)
)
+ argument_spec.update(meta_args)
module = AnsibleModule(
- argument_spec = argument_spec,
+ argument_spec=argument_spec,
supports_check_mode=True
)
- (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
+ if module.params['validate_certs']:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
parent_partition = module.params['parent_partition']
name = module.params['name']
@@ -356,29 +357,30 @@ def main():
# end monitor specific stuff
- api = bigip_api(server, user, password)
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
monitor_exists = check_monitor_exists(module, api, monitor, parent)
-
# ipport is a special setting
- if monitor_exists: # make sure to not update current settings if not asked
+ if monitor_exists:
+ # make sure to not update current settings if not asked
cur_ipport = get_ipport(api, monitor)
if ip is None:
ip = cur_ipport['ipport']['address']
if port is None:
port = cur_ipport['ipport']['port']
- else: # use API defaults if not defined to create it
- if interval is None:
+ else:
+ # use API defaults if not defined to create it
+ if interval is None:
interval = 5
- if timeout is None:
+ if timeout is None:
timeout = 16
- if ip is None:
+ if ip is None:
ip = '0.0.0.0'
- if port is None:
+ if port is None:
port = 0
- if send is None:
+ if send is None:
send = ''
- if receive is None:
+ if receive is None:
receive = ''
# define and set address type
@@ -391,76 +393,90 @@ def main():
else:
address_type = 'ATYPE_UNSET'
- ipport = {'address_type': address_type,
- 'ipport': {'address': ip,
- 'port': port}}
-
- template_attributes = {'parent_template': parent,
- 'interval': interval,
- 'timeout': timeout,
- 'dest_ipport': ipport,
- 'is_read_only': False,
- 'is_directly_usable': True}
+ ipport = {
+ 'address_type': address_type,
+ 'ipport': {
+ 'address': ip,
+ 'port': port
+ }
+ }
+
+ template_attributes = {
+ 'parent_template': parent,
+ 'interval': interval,
+ 'timeout': timeout,
+ 'dest_ipport': ipport,
+ 'is_read_only': False,
+ 'is_directly_usable': True
+ }
# monitor specific stuff
if type == 'TTYPE_TCP':
- template_string_properties = [{'type': 'STYPE_SEND',
- 'value': send},
- {'type': 'STYPE_RECEIVE',
- 'value': receive}]
+ template_string_properties = [
+ {
+ 'type': 'STYPE_SEND',
+ 'value': send
+ },
+ {
+ 'type': 'STYPE_RECEIVE',
+ 'value': receive
+ }
+ ]
else:
template_string_properties = []
- template_integer_properties = [{'type': 'ITYPE_INTERVAL',
- 'value': interval},
- {'type': 'ITYPE_TIMEOUT',
- 'value': timeout},
- {'type': 'ITYPE_TIME_UNTIL_UP',
- 'value': interval}]
+ template_integer_properties = [
+ {
+ 'type': 'ITYPE_INTERVAL',
+ 'value': interval
+ },
+ {
+ 'type': 'ITYPE_TIMEOUT',
+ 'value': timeout
+ },
+ {
+ 'type': 'ITYPE_TIME_UNTIL_UP',
+ 'value': time_until_up
+ }
+ ]
# main logic, monitor generic
try:
result = {'changed': False} # default
-
if state == 'absent':
if monitor_exists:
if not module.check_mode:
- # possible race condition if same task
+ # possible race condition if same task
# on other node deleted it first
result['changed'] |= delete_monitor(api, monitor)
else:
result['changed'] |= True
-
- else: # state present
- ## check for monitor itself
- if not monitor_exists: # create it
- if not module.check_mode:
+ else:
+ # check for monitor itself
+ if not monitor_exists:
+ if not module.check_mode:
# again, check changed status here b/c race conditions
# if other task already created it
result['changed'] |= create_monitor(api, monitor, template_attributes)
- else:
+ else:
result['changed'] |= True
- ## check for monitor parameters
+ # check for monitor parameters
# whether it already existed, or was just created, now update
# the update functions need to check for check mode but
# cannot update settings if it doesn't exist which happens in check mode
- if monitor_exists and not module.check_mode:
- result['changed'] |= update_monitor_properties(api, module, monitor,
- template_string_properties,
- template_integer_properties)
- # else assume nothing changed
+ result['changed'] |= update_monitor_properties(api, module, monitor,
+ template_string_properties,
+ template_integer_properties)
# we just have to update the ipport if monitor already exists and it's different
if monitor_exists and cur_ipport != ipport:
- set_ipport(api, monitor, ipport)
+ set_ipport(api, monitor, ipport)
result['changed'] |= True
- #else: monitor doesn't exist (check mode) or ipport is already ok
-
-
- except Exception, e:
+ # else: monitor doesn't exist (check mode) or ipport is already ok
+ except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
@@ -468,5 +484,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py
index 28eacc0d6f5..08107f6e2ce 100644
--- a/network/f5/bigip_node.py
+++ b/network/f5/bigip_node.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-
+#
# (c) 2013, Matt Hite
#
# This file is part of Ansible
@@ -18,120 +18,113 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: bigip_node
short_description: "Manages F5 BIG-IP LTM nodes"
description:
- - "Manages F5 BIG-IP LTM nodes via iControl SOAP API"
+ - "Manages F5 BIG-IP LTM nodes via iControl SOAP API"
version_added: "1.4"
-author: "Matt Hite (@mhite)"
+author:
+ - Matt Hite (@mhite)
+ - Tim Rupp (@caphrim007)
notes:
- - "Requires BIG-IP software version >= 11"
- - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- - "Best run as a local_action in your playbook"
+ - "Requires BIG-IP software version >= 11"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
requirements:
- - bigsuds
+ - bigsuds
options:
- server:
- description:
- - BIG-IP host
- required: true
- default: null
- choices: []
- aliases: []
- user:
- description:
- - BIG-IP username
- required: true
- default: null
- choices: []
- aliases: []
- password:
- description:
- - BIG-IP password
- required: true
- default: null
- choices: []
- aliases: []
- validate_certs:
- description:
- - If C(no), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
- required: false
- default: 'yes'
- choices: ['yes', 'no']
- version_added: 2.0
- state:
- description:
- - Pool member state
- required: true
- default: present
- choices: ['present', 'absent']
- aliases: []
- session_state:
- description:
- - Set new session availability status for node
- version_added: "1.9"
- required: false
- default: null
- choices: ['enabled', 'disabled']
- aliases: []
- monitor_state:
- description:
- - Set monitor availability status for node
- version_added: "1.9"
- required: false
- default: null
- choices: ['enabled', 'disabled']
- aliases: []
- partition:
- description:
- - Partition
- required: false
- default: 'Common'
- choices: []
- aliases: []
- name:
- description:
- - "Node name"
- required: false
- default: null
- choices: []
- host:
- description:
- - "Node IP. Required when state=present and node does not exist. Error when state=absent."
- required: true
- default: null
- choices: []
- aliases: ['address', 'ip']
+ state:
+ description:
+ - Pool member state
+ required: true
+ default: present
+ choices: ['present', 'absent']
+ aliases: []
+ session_state:
+ description:
+ - Set new session availability status for node
+ version_added: "1.9"
+ required: false
+ default: null
+ choices: ['enabled', 'disabled']
+ aliases: []
+ monitor_state:
+ description:
+ - Set monitor availability status for node
+ version_added: "1.9"
+ required: false
+ default: null
+ choices: ['enabled', 'disabled']
+ aliases: []
+ partition:
+ description:
+ - Partition
+ required: false
+ default: 'Common'
+ choices: []
+ aliases: []
+ name:
+ description:
+ - "Node name"
+ required: false
+ default: null
+ choices: []
+ monitor_type:
+ description:
+ - Monitor rule type when monitors > 1
+ version_added: "2.2"
+ required: False
+ default: null
+ choices: ['and_list', 'm_of_n']
+ aliases: []
+ quorum:
+ description:
+ - Monitor quorum value when monitor_type is m_of_n
+ version_added: "2.2"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ monitors:
description:
- description:
- - "Node description."
- required: false
- default: null
- choices: []
+ - Monitor template name list. Always use the full path to the monitor.
+ version_added: "2.2"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ host:
+ description:
+ - "Node IP. Required when state=present and node does not exist. Error when state=absent."
+ required: true
+ default: null
+ choices: []
+ aliases: ['address', 'ip']
+ description:
+ description:
+ - "Node description."
+ required: false
+ default: null
+ choices: []
+extends_documentation_fragment: f5
'''
EXAMPLES = '''
-
-## playbook task examples:
-
----
-# file bigip-test.yml
-# ...
-- hosts: bigip-test
- tasks:
- - name: Add node
- local_action: >
- bigip_node
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=present
- partition=matthite
- host="{{ ansible_default_ipv4["address"] }}"
- name="{{ ansible_default_ipv4["address"] }}"
+- name: Add node
+ bigip_node:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ partition: "Common"
+ host: "10.20.30.40"
+ name: "10.20.30.40"
# Note that the BIG-IP automatically names the node using the
# IP address specified in previous play's host parameter.
@@ -140,26 +133,38 @@
# Alternatively, you could have specified a name with the
# name parameter when state=present.
- - name: Modify node description
- local_action: >
- bigip_node
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=present
- partition=matthite
- name="{{ ansible_default_ipv4["address"] }}"
- description="Our best server yet"
-
- - name: Delete node
- local_action: >
- bigip_node
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=absent
- partition=matthite
- name="{{ ansible_default_ipv4["address"] }}"
+- name: Add node with a single 'ping' monitor
+ bigip_node:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ partition: "Common"
+ host: "10.20.30.40"
+ name: "mytestserver"
+ monitors:
+ - /Common/icmp
+ delegate_to: localhost
+
+- name: Modify node description
+ bigip_node:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ partition: "Common"
+ name: "10.20.30.40"
+ description: "Our best server yet"
+ delegate_to: localhost
+
+- name: Delete node
+ bigip_node:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "absent"
+ partition: "Common"
+ name: "10.20.30.40"
# The BIG-IP GUI doesn't map directly to the API calls for "Node ->
# General Properties -> State". The following states map to API monitor
@@ -174,27 +179,26 @@
#
# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
- - name: Force node offline
- local_action: >
- bigip_node
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=present
- session_state=disabled
- monitor_state=disabled
- partition=matthite
- name="{{ ansible_default_ipv4["address"] }}"
-
+- name: Force node offline
+ bigip_node:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "mysecret"
+ state: "present"
+ session_state: "disabled"
+ monitor_state: "disabled"
+ partition: "Common"
+ name: "10.20.30.40"
'''
+
def node_exists(api, address):
# hack to determine if node exists
result = False
try:
api.LocalLB.NodeAddressV2.get_object_status(nodes=[address])
result = True
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
@@ -202,12 +206,17 @@ def node_exists(api, address):
raise
return result
+
def create_node_address(api, address, name):
try:
- api.LocalLB.NodeAddressV2.create(nodes=[name], addresses=[address], limits=[0])
+ api.LocalLB.NodeAddressV2.create(
+ nodes=[name],
+ addresses=[address],
+ limits=[0]
+ )
result = True
desc = ""
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "already exists" in str(e):
result = False
desc = "referenced name or IP already in use"
@@ -216,15 +225,17 @@ def create_node_address(api, address, name):
raise
return (result, desc)
+
def get_node_address(api, name):
return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0]
+
def delete_node_address(api, address):
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
desc = ""
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
desc = "node referenced by pool"
@@ -233,51 +244,89 @@ def delete_node_address(api, address):
raise
return (result, desc)
+
def set_node_description(api, name, description):
api.LocalLB.NodeAddressV2.set_description(nodes=[name],
descriptions=[description])
+
def get_node_description(api, name):
return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0]
+
def set_node_session_enabled_state(api, name, session_state):
session_state = "STATE_%s" % session_state.strip().upper()
api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name],
states=[session_state])
+
def get_node_session_status(api, name):
result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0]
result = result.split("SESSION_STATUS_")[-1].lower()
return result
+
def set_node_monitor_state(api, name, monitor_state):
monitor_state = "STATE_%s" % monitor_state.strip().upper()
api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name],
states=[monitor_state])
+
def get_node_monitor_status(api, name):
result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0]
result = result.split("MONITOR_STATUS_")[-1].lower()
return result
+def get_monitors(api, name):
+ result = api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=[name])[0]
+ monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
+ quorum = result['quorum']
+ monitor_templates = result['monitor_templates']
+ return (monitor_type, quorum, monitor_templates)
+
+
+def set_monitors(api, name, monitor_type, quorum, monitor_templates):
+ monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
+ monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
+ api.LocalLB.NodeAddressV2.set_monitor_rule(nodes=[name],
+ monitor_rules=[monitor_rule])
+
+
def main():
- argument_spec=f5_argument_spec();
- argument_spec.update(dict(
- session_state = dict(type='str', choices=['enabled', 'disabled']),
- monitor_state = dict(type='str', choices=['enabled', 'disabled']),
- name = dict(type='str', required=True),
- host = dict(type='str', aliases=['address', 'ip']),
- description = dict(type='str')
- )
+ monitor_type_choices = ['and_list', 'm_of_n']
+
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ session_state=dict(type='str', choices=['enabled', 'disabled']),
+ monitor_state=dict(type='str', choices=['enabled', 'disabled']),
+ name=dict(type='str', required=True),
+ host=dict(type='str', aliases=['address', 'ip']),
+ description=dict(type='str'),
+ monitor_type=dict(type='str', choices=monitor_type_choices),
+ quorum=dict(type='int'),
+ monitors=dict(type='list')
)
+ argument_spec.update(meta_args)
module = AnsibleModule(
- argument_spec = argument_spec,
+ argument_spec=argument_spec,
supports_check_mode=True
)
- (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
+ if module.params['validate_certs']:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
session_state = module.params['session_state']
monitor_state = module.params['monitor_state']
@@ -285,12 +334,41 @@ def main():
name = module.params['name']
address = fq_name(partition, name)
description = module.params['description']
-
+ monitor_type = module.params['monitor_type']
+ if monitor_type:
+ monitor_type = monitor_type.lower()
+ quorum = module.params['quorum']
+ monitors = module.params['monitors']
+ if monitors:
+ monitors = []
+ for monitor in module.params['monitors']:
+ monitors.append(fq_name(partition, monitor))
+
+ # sanity check user supplied values
if state == 'absent' and host is not None:
module.fail_json(msg="host parameter invalid when state=absent")
+ if monitors:
+ if len(monitors) == 1:
+ # set default required values for single monitor
+ quorum = 0
+ monitor_type = 'single'
+ elif len(monitors) > 1:
+ if not monitor_type:
+ module.fail_json(msg="monitor_type required for monitors > 1")
+ if monitor_type == 'm_of_n' and not quorum:
+ module.fail_json(msg="quorum value required for monitor_type m_of_n")
+ if monitor_type != 'm_of_n':
+ quorum = 0
+ elif monitor_type:
+ # no monitors specified but monitor_type exists
+ module.fail_json(msg="monitor_type require monitors parameter")
+ elif quorum is not None:
+ # no monitors specified but quorum exists
+ module.fail_json(msg="quorum requires monitors parameter")
+
try:
- api = bigip_api(server, user, password)
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
@@ -308,7 +386,7 @@ def main():
elif state == 'present':
if not node_exists(api, address):
if host is None:
- module.fail_json(msg="host parameter required when " \
+ module.fail_json(msg="host parameter required when "
"state=present and node does not exist")
if not module.check_mode:
created, desc = create_node_address(api, address=host, name=address)
@@ -326,6 +404,8 @@ def main():
if description is not None:
set_node_description(api, address, description)
result = {'changed': True}
+ if monitors:
+ set_monitors(api, address, monitor_type, quorum, monitors)
else:
# check-mode return value
result = {'changed': True}
@@ -333,8 +413,8 @@ def main():
# node exists -- potentially modify attributes
if host is not None:
if get_node_address(api, address) != host:
- module.fail_json(msg="Changing the node address is " \
- "not supported by the API; " \
+ module.fail_json(msg="Changing the node address is "
+ "not supported by the API; "
"delete and recreate the node.")
if session_state is not None:
session_status = get_node_session_status(api, address)
@@ -345,7 +425,7 @@ def main():
session_state)
result = {'changed': True}
elif session_state == 'disabled' and \
- session_status != 'force_disabled':
+ session_status != 'force_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
@@ -359,7 +439,7 @@ def main():
monitor_state)
result = {'changed': True}
elif monitor_state == 'disabled' and \
- monitor_status != 'forced_down':
+ monitor_status != 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
@@ -369,14 +449,19 @@ def main():
if not module.check_mode:
set_node_description(api, address, description)
result = {'changed': True}
-
- except Exception, e:
+ if monitors:
+ t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, address)
+ if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
+ if not module.check_mode:
+ set_monitors(api, address, monitor_type, quorum, monitors)
+ result = {'changed': True}
+ except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
-# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py
index 1628f6c68c9..eb6b8f3adaa 100644
--- a/network/f5/bigip_pool.py
+++ b/network/f5/bigip_pool.py
@@ -18,223 +18,217 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: bigip_pool
short_description: "Manages F5 BIG-IP LTM pools"
description:
- - "Manages F5 BIG-IP LTM pools via iControl SOAP API"
-version_added: "1.2"
-author: "Matt Hite (@mhite)"
+ - Manages F5 BIG-IP LTM pools via iControl SOAP API
+version_added: 1.2
+author:
+ - Matt Hite (@mhite)
+ - Tim Rupp (@caphrim007)
notes:
- - "Requires BIG-IP software version >= 11"
- - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- - "Best run as a local_action in your playbook"
+ - Requires BIG-IP software version >= 11
+ - F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
+ - Best run as a local_action in your playbook
requirements:
- - bigsuds
+ - bigsuds
options:
- server:
- description:
- - BIG-IP host
- required: true
- default: null
- choices: []
- aliases: []
- user:
- description:
- - BIG-IP username
- required: true
- default: null
- choices: []
- aliases: []
- password:
- description:
- - BIG-IP password
- required: true
- default: null
- choices: []
- aliases: []
- validate_certs:
- description:
- - If C(no), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
- required: false
- default: 'yes'
- choices: ['yes', 'no']
- version_added: 2.0
- state:
- description:
- - Pool/pool member state
- required: false
- default: present
- choices: ['present', 'absent']
- aliases: []
- name:
- description:
- - Pool name
- required: true
- default: null
- choices: []
- aliases: ['pool']
- partition:
- description:
- - Partition of pool/pool member
- required: false
- default: 'Common'
- choices: []
- aliases: []
- lb_method:
- description:
- - Load balancing method
- version_added: "1.3"
- required: False
- default: 'round_robin'
- choices: ['round_robin', 'ratio_member', 'least_connection_member',
- 'observed_member', 'predictive_member', 'ratio_node_address',
- 'least_connection_node_address', 'fastest_node_address',
- 'observed_node_address', 'predictive_node_address',
- 'dynamic_ratio', 'fastest_app_response', 'least_sessions',
- 'dynamic_ratio_member', 'l3_addr', 'unknown',
- 'weighted_least_connection_member',
- 'weighted_least_connection_node_address',
- 'ratio_session', 'ratio_least_connection_member',
- 'ratio_least_connection_node_address']
- aliases: []
- monitor_type:
- description:
- - Monitor rule type when monitors > 1
- version_added: "1.3"
- required: False
- default: null
- choices: ['and_list', 'm_of_n']
- aliases: []
- quorum:
- description:
- - Monitor quorum value when monitor_type is m_of_n
- version_added: "1.3"
- required: False
- default: null
- choices: []
- aliases: []
- monitors:
- description:
- - Monitor template name list. Always use the full path to the monitor.
- version_added: "1.3"
- required: False
- default: null
- choices: []
- aliases: []
- slow_ramp_time:
- description:
- - Sets the ramp-up time (in seconds) to gradually ramp up the load on newly added or freshly detected up pool members
- version_added: "1.3"
- required: False
- default: null
- choices: []
- aliases: []
- service_down_action:
- description:
- - Sets the action to take when node goes down in pool
- version_added: "1.3"
- required: False
- default: null
- choices: ['none', 'reset', 'drop', 'reselect']
- aliases: []
- host:
- description:
- - "Pool member IP"
- required: False
- default: null
- choices: []
- aliases: ['address']
- port:
- description:
- - "Pool member port"
- required: False
- default: null
- choices: []
- aliases: []
+ state:
+ description:
+ - Pool/pool member state
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+ aliases: []
+ name:
+ description:
+ - Pool name
+ required: true
+ default: null
+ choices: []
+ aliases:
+ - pool
+ partition:
+ description:
+ - Partition of pool/pool member
+ required: false
+ default: 'Common'
+ choices: []
+ aliases: []
+ lb_method:
+ description:
+ - Load balancing method
+ version_added: "1.3"
+ required: False
+ default: 'round_robin'
+ choices:
+ - round_robin
+ - ratio_member
+ - least_connection_member
+ - observed_member
+ - predictive_member
+ - ratio_node_address
+ - least_connection_node_address
+ - fastest_node_address
+ - observed_node_address
+ - predictive_node_address
+ - dynamic_ratio
+ - fastest_app_response
+ - least_sessions
+ - dynamic_ratio_member
+ - l3_addr
+ - weighted_least_connection_member
+ - weighted_least_connection_node_address
+ - ratio_session
+ - ratio_least_connection_member
+ - ratio_least_connection_node_address
+ aliases: []
+ monitor_type:
+ description:
+ - Monitor rule type when monitors > 1
+ version_added: "1.3"
+ required: False
+ default: null
+ choices: ['and_list', 'm_of_n']
+ aliases: []
+ quorum:
+ description:
+ - Monitor quorum value when monitor_type is m_of_n
+ version_added: "1.3"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ monitors:
+ description:
+ - Monitor template name list. Always use the full path to the monitor.
+ version_added: "1.3"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ slow_ramp_time:
+ description:
+ - Sets the ramp-up time (in seconds) to gradually ramp up the load on
+ newly added or freshly detected up pool members
+ version_added: "1.3"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ reselect_tries:
+ description:
+ - Sets the number of times the system tries to contact a pool member
+ after a passive failure
+ version_added: "2.2"
+ required: False
+ default: null
+ choices: []
+ aliases: []
+ service_down_action:
+ description:
+ - Sets the action to take when node goes down in pool
+ version_added: "1.3"
+ required: False
+ default: null
+ choices:
+ - none
+ - reset
+ - drop
+ - reselect
+ aliases: []
+ host:
+ description:
+ - "Pool member IP"
+ required: False
+ default: null
+ choices: []
+ aliases:
+ - address
+ port:
+ description:
+ - Pool member port
+ required: False
+ default: null
+ choices: []
+ aliases: []
+extends_documentation_fragment: f5
'''
EXAMPLES = '''
+- name: Create pool
+ bigip_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ name: "my-pool"
+ partition: "Common"
+ lb_method: "least_connection_member"
+ slow_ramp_time: 120
+ delegate_to: localhost
+
+- name: Modify load balancer method
+ bigip_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ name: "my-pool"
+ partition: "Common"
+ lb_method: "round_robin"
+
+- name: Add pool member
+ bigip_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ name: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+
+- name: Remove pool member from pool
+ bigip_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "absent"
+ name: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+
+- name: Delete pool
+ bigip_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "absent"
+ name: "my-pool"
+ partition: "Common"
+'''
-## playbook task examples:
-
----
-# file bigip-test.yml
-# ...
-- hosts: localhost
- tasks:
- - name: Create pool
- local_action: >
- bigip_pool
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=present
- name=matthite-pool
- partition=matthite
- lb_method=least_connection_member
- slow_ramp_time=120
-
- - name: Modify load balancer method
- local_action: >
- bigip_pool
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=present
- name=matthite-pool
- partition=matthite
- lb_method=round_robin
-
-- hosts: bigip-test
- tasks:
- - name: Add pool member
- local_action: >
- bigip_pool
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=present
- name=matthite-pool
- partition=matthite
- host="{{ ansible_default_ipv4["address"] }}"
- port=80
-
- - name: Remove pool member from pool
- local_action: >
- bigip_pool
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=absent
- name=matthite-pool
- partition=matthite
- host="{{ ansible_default_ipv4["address"] }}"
- port=80
-
-- hosts: localhost
- tasks:
- - name: Delete pool
- local_action: >
- bigip_pool
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=absent
- name=matthite-pool
- partition=matthite
-
+RETURN = '''
'''
+
def pool_exists(api, pool):
# hack to determine if pool exists
result = False
try:
api.LocalLB.Pool.get_object_status(pool_names=[pool])
result = True
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
@@ -242,6 +236,7 @@ def pool_exists(api, pool):
raise
return result
+
def create_pool(api, pool, lb_method):
# create requires lb_method but we don't want to default
# to a value on subsequent runs
@@ -251,18 +246,22 @@ def create_pool(api, pool, lb_method):
api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method],
members=[[]])
+
def remove_pool(api, pool):
api.LocalLB.Pool.delete_pool(pool_names=[pool])
+
def get_lb_method(api, pool):
lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0]
lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
return lb_method
+
def set_lb_method(api, pool, lb_method):
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method])
+
def get_monitors(api, pool):
result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule']
monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
@@ -270,28 +269,43 @@ def get_monitors(api, pool):
monitor_templates = result['monitor_templates']
return (monitor_type, quorum, monitor_templates)
+
def set_monitors(api, pool, monitor_type, quorum, monitor_templates):
monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule}
api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association])
+
def get_slow_ramp_time(api, pool):
result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0]
return result
+
def set_slow_ramp_time(api, pool, seconds):
api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds])
+
+def get_reselect_tries(api, pool):
+ result = api.LocalLB.Pool.get_reselect_tries(pool_names=[pool])[0]
+ return result
+
+
+def set_reselect_tries(api, pool, tries):
+ api.LocalLB.Pool.set_reselect_tries(pool_names=[pool], values=[tries])
+
+
def get_action_on_service_down(api, pool):
result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0]
result = result.split("SERVICE_DOWN_ACTION_")[-1].lower()
return result
+
def set_action_on_service_down(api, pool, action):
action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper()
api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action])
+
def member_exists(api, pool, address, port):
# hack to determine if member exists
result = False
@@ -300,7 +314,7 @@ def member_exists(api, pool, address, port):
api.LocalLB.Pool.get_member_object_status(pool_names=[pool],
members=[members])
result = True
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
@@ -308,12 +322,13 @@ def member_exists(api, pool, address, port):
raise
return result
+
def delete_node_address(api, address):
result = False
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
else:
@@ -321,14 +336,17 @@ def delete_node_address(api, address):
raise
return result
+
def remove_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members])
+
def add_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members])
+
def main():
lb_method_choices = ['round_robin', 'ratio_member',
'least_connection_member', 'observed_member',
@@ -337,7 +355,7 @@ def main():
'fastest_node_address', 'observed_node_address',
'predictive_node_address', 'dynamic_ratio',
'fastest_app_response', 'least_sessions',
- 'dynamic_ratio_member', 'l3_addr', 'unknown',
+ 'dynamic_ratio_member', 'l3_addr',
'weighted_least_connection_member',
'weighted_least_connection_node_address',
'ratio_session', 'ratio_least_connection_member',
@@ -347,29 +365,45 @@ def main():
service_down_choices = ['none', 'reset', 'drop', 'reselect']
- argument_spec=f5_argument_spec();
- argument_spec.update(dict(
- name = dict(type='str', required=True, aliases=['pool']),
- lb_method = dict(type='str', choices=lb_method_choices),
- monitor_type = dict(type='str', choices=monitor_type_choices),
- quorum = dict(type='int'),
- monitors = dict(type='list'),
- slow_ramp_time = dict(type='int'),
- service_down_action = dict(type='str', choices=service_down_choices),
- host = dict(type='str', aliases=['address']),
- port = dict(type='int')
- )
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ name=dict(type='str', required=True, aliases=['pool']),
+ lb_method=dict(type='str', choices=lb_method_choices),
+ monitor_type=dict(type='str', choices=monitor_type_choices),
+ quorum=dict(type='int'),
+ monitors=dict(type='list'),
+ slow_ramp_time=dict(type='int'),
+ reselect_tries=dict(type='int'),
+ service_down_action=dict(type='str', choices=service_down_choices),
+ host=dict(type='str', aliases=['address']),
+ port=dict(type='int')
)
+ argument_spec.update(meta_args)
module = AnsibleModule(
- argument_spec = argument_spec,
+ argument_spec=argument_spec,
supports_check_mode=True
)
- (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
+ if not bigsuds_found:
+ module.fail_json(msg="the python bigsuds module is required")
+
+ if module.params['validate_certs']:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
name = module.params['name']
- pool = fq_name(partition,name)
+ pool = fq_name(partition, name)
lb_method = module.params['lb_method']
if lb_method:
lb_method = lb_method.lower()
@@ -383,23 +417,21 @@ def main():
for monitor in module.params['monitors']:
monitors.append(fq_name(partition, monitor))
slow_ramp_time = module.params['slow_ramp_time']
+ reselect_tries = module.params['reselect_tries']
service_down_action = module.params['service_down_action']
if service_down_action:
service_down_action = service_down_action.lower()
host = module.params['host']
- address = fq_name(partition,host)
+ address = fq_name(partition, host)
port = module.params['port']
- if not validate_certs:
- disable_ssl_cert_validation()
-
# sanity check user supplied values
- if (host and not port) or (port and not host):
+ if (host and port is None) or (port is not None and not host):
module.fail_json(msg="both host and port must be supplied")
- if 1 > port > 65535:
- module.fail_json(msg="valid ports must be in range 1 - 65535")
+ if port is not None and (0 > port or port > 65535):
+ module.fail_json(msg="valid ports must be in range 0 - 65535")
if monitors:
if len(monitors) == 1:
@@ -421,7 +453,7 @@ def main():
module.fail_json(msg="quorum requires monitors parameter")
try:
- api = bigip_api(server, user, password)
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
@@ -442,7 +474,7 @@ def main():
try:
remove_pool(api, pool)
result = {'changed': True}
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = {'changed': False}
else:
@@ -465,7 +497,7 @@ def main():
try:
create_pool(api, pool, lb_method)
result = {'changed': True}
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "already exists" in str(e):
update = True
else:
@@ -476,6 +508,8 @@ def main():
set_monitors(api, pool, monitor_type, quorum, monitors)
if slow_ramp_time:
set_slow_ramp_time(api, pool, slow_ramp_time)
+ if reselect_tries:
+ set_reselect_tries(api, pool, reselect_tries)
if service_down_action:
set_action_on_service_down(api, pool, service_down_action)
if host and port:
@@ -502,6 +536,10 @@ def main():
if not module.check_mode:
set_slow_ramp_time(api, pool, slow_ramp_time)
result = {'changed': True}
+ if reselect_tries and reselect_tries != get_reselect_tries(api, pool):
+ if not module.check_mode:
+ set_reselect_tries(api, pool, reselect_tries)
+ result = {'changed': True}
if service_down_action and service_down_action != get_action_on_service_down(api, pool):
if not module.check_mode:
set_action_on_service_down(api, pool, service_down_action)
@@ -510,14 +548,18 @@ def main():
if not module.check_mode:
add_pool_member(api, pool, address, port)
result = {'changed': True}
+ if (host and port == 0) and not member_exists(api, pool, address, port):
+ if not module.check_mode:
+ add_pool_member(api, pool, address, port)
+ result = {'changed': True}
- except Exception, e:
+ except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
-# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py
index ec2b7135372..42d4538f9f6 100644
--- a/network/f5/bigip_pool_member.py
+++ b/network/f5/bigip_pool_member.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-
+#
# (c) 2013, Matt Hite
#
# This file is part of Ansible
@@ -18,191 +18,191 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: bigip_pool_member
-short_description: "Manages F5 BIG-IP LTM pool members"
+short_description: Manages F5 BIG-IP LTM pool members
description:
- - "Manages F5 BIG-IP LTM pool members via iControl SOAP API"
-version_added: "1.4"
-author: "Matt Hite (@mhite)"
+ - Manages F5 BIG-IP LTM pool members via iControl SOAP API
+version_added: 1.4
+author:
+ - Matt Hite (@mhite)
+ - Tim Rupp (@caphrim007)
notes:
- - "Requires BIG-IP software version >= 11"
- - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- - "Best run as a local_action in your playbook"
- - "Supersedes bigip_pool for managing pool members"
-
+ - Requires BIG-IP software version >= 11
+ - F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
+ - Best run as a local_action in your playbook
+ - Supersedes bigip_pool for managing pool members
requirements:
- - bigsuds
+ - bigsuds
options:
- server:
- description:
- - BIG-IP host
- required: true
- user:
- description:
- - BIG-IP username
- required: true
- password:
- description:
- - BIG-IP password
- required: true
- validate_certs:
- description:
- - If C(no), SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
- required: false
- default: 'yes'
- choices: ['yes', 'no']
- version_added: 2.0
- state:
- description:
- - Pool member state
- required: true
- default: present
- choices: ['present', 'absent']
- session_state:
- description:
- - Set new session availability status for pool member
- version_added: "2.0"
- required: false
- default: null
- choices: ['enabled', 'disabled']
- monitor_state:
- description:
- - Set monitor availability status for pool member
- version_added: "2.0"
- required: false
- default: null
- choices: ['enabled', 'disabled']
- pool:
- description:
- - Pool name. This pool must exist.
- required: true
- partition:
- description:
- - Partition
- required: false
- default: 'Common'
- host:
- description:
- - Pool member IP
- required: true
- aliases: ['address', 'name']
- port:
- description:
- - Pool member port
- required: true
- connection_limit:
- description:
- - Pool member connection limit. Setting this to 0 disables the limit.
- required: false
- default: null
+ state:
+ description:
+ - Pool member state
+ required: true
+ default: present
+ choices:
+ - present
+ - absent
+ session_state:
+ description:
+ - Set new session availability status for pool member
+ version_added: 2.0
+ required: false
+ default: null
+ choices:
+ - enabled
+ - disabled
+ monitor_state:
+ description:
+ - Set monitor availability status for pool member
+ version_added: 2.0
+ required: false
+ default: null
+ choices:
+ - enabled
+ - disabled
+ pool:
+ description:
+ - Pool name. This pool must exist.
+ required: true
+ partition:
+ description:
+ - Partition
+ required: false
+ default: 'Common'
+ host:
+ description:
+ - Pool member IP
+ required: true
+ aliases:
+ - address
+ - name
+ port:
+ description:
+ - Pool member port
+ required: true
+ connection_limit:
+ description:
+ - Pool member connection limit. Setting this to 0 disables the limit.
+ required: false
+ default: null
+ description:
+ description:
+ - Pool member description
+ required: false
+ default: null
+ rate_limit:
description:
- description:
- - Pool member description
- required: false
- default: null
- rate_limit:
- description:
- - Pool member rate limit (connections-per-second). Setting this to 0 disables the limit.
- required: false
- default: null
- ratio:
- description:
- - Pool member ratio weight. Valid values range from 1 through 100. New pool members -- unless overriden with this value -- default to 1.
- required: false
- default: null
+ - Pool member rate limit (connections-per-second). Setting this to 0
+ disables the limit.
+ required: false
+ default: null
+ ratio:
+ description:
+ - Pool member ratio weight. Valid values range from 1 through 100.
+ New pool members -- unless overriden with this value -- default
+ to 1.
+ required: false
+ default: null
+ preserve_node:
+ description:
+ - When state is absent and the pool member is no longer referenced
+ in other pools, the default behavior removes the unused node
+ o bject. Setting this to 'yes' disables this behavior.
+ required: false
+ default: 'no'
+ choices:
+ - yes
+ - no
+ version_added: 2.1
+extends_documentation_fragment: f5
'''
EXAMPLES = '''
-
-## playbook task examples:
-
----
-# file bigip-test.yml
-# ...
-- hosts: bigip-test
- tasks:
- - name: Add pool member
- local_action: >
- bigip_pool_member
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=present
- pool=matthite-pool
- partition=matthite
- host="{{ ansible_default_ipv4["address"] }}"
- port=80
- description="web server"
- connection_limit=100
- rate_limit=50
- ratio=2
-
- - name: Modify pool member ratio and description
- local_action: >
- bigip_pool_member
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=present
- pool=matthite-pool
- partition=matthite
- host="{{ ansible_default_ipv4["address"] }}"
- port=80
- ratio=1
- description="nginx server"
-
- - name: Remove pool member from pool
- local_action: >
- bigip_pool_member
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=absent
- pool=matthite-pool
- partition=matthite
- host="{{ ansible_default_ipv4["address"] }}"
- port=80
-
-
- # The BIG-IP GUI doesn't map directly to the API calls for "Pool ->
- # Members -> State". The following states map to API monitor
- # and session states.
- #
- # Enabled (all traffic allowed):
- # monitor_state=enabled, session_state=enabled
- # Disabled (only persistent or active connections allowed):
- # monitor_state=enabled, session_state=disabled
- # Forced offline (only active connections allowed):
- # monitor_state=disabled, session_state=disabled
- #
- # See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
-
- - name: Force pool member offline
- local_action: >
- bigip_pool_member
- server=lb.mydomain.com
- user=admin
- password=mysecret
- state=present
- session_state=disabled
- monitor_state=disabled
- pool=matthite-pool
- partition=matthite
- host="{{ ansible_default_ipv4["address"] }}"
- port=80
-
+- name: Add pool member
+ bigip_pool_member:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ pool: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+ description: "web server"
+ connection_limit: 100
+ rate_limit: 50
+ ratio: 2
+ delegate_to: localhost
+
+- name: Modify pool member ratio and description
+ bigip_pool_member:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ pool: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+ ratio: 1
+ description: "nginx server"
+ delegate_to: localhost
+
+- name: Remove pool member from pool
+ bigip_pool_member:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "absent"
+ pool: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+ delegate_to: localhost
+
+
+# The BIG-IP GUI doesn't map directly to the API calls for "Pool ->
+# Members -> State". The following states map to API monitor
+# and session states.
+#
+# Enabled (all traffic allowed):
+# monitor_state=enabled, session_state=enabled
+# Disabled (only persistent or active connections allowed):
+# monitor_state=enabled, session_state=disabled
+# Forced offline (only active connections allowed):
+# monitor_state=disabled, session_state=disabled
+#
+# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
+
+- name: Force pool member offline
+ bigip_pool_member:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ session_state: "disabled"
+ monitor_state: "disabled"
+ pool: "my-pool"
+ partition: "Common"
+ host: "{{ ansible_default_ipv4["address"] }}"
+ port: 80
+ delegate_to: localhost
'''
+
def pool_exists(api, pool):
# hack to determine if pool exists
result = False
try:
api.LocalLB.Pool.get_object_status(pool_names=[pool])
result = True
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
@@ -210,6 +210,7 @@ def pool_exists(api, pool):
raise
return result
+
def member_exists(api, pool, address, port):
# hack to determine if member exists
result = False
@@ -218,7 +219,7 @@ def member_exists(api, pool, address, port):
api.LocalLB.Pool.get_member_object_status(pool_names=[pool],
members=[members])
result = True
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
@@ -226,12 +227,13 @@ def member_exists(api, pool, address, port):
raise
return result
+
def delete_node_address(api, address):
result = False
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
- except bigsuds.OperationFailed, e:
+ except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
else:
@@ -239,93 +241,170 @@ def delete_node_address(api, address):
raise
return result
+
def remove_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
- api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members])
+ api.LocalLB.Pool.remove_member_v2(
+ pool_names=[pool],
+ members=[members]
+ )
+
def add_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
- api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members])
+ api.LocalLB.Pool.add_member_v2(
+ pool_names=[pool],
+ members=[members]
+ )
+
def get_connection_limit(api, pool, address, port):
members = [{'address': address, 'port': port}]
- result = api.LocalLB.Pool.get_member_connection_limit(pool_names=[pool], members=[members])[0][0]
+ result = api.LocalLB.Pool.get_member_connection_limit(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
return result
+
def set_connection_limit(api, pool, address, port, limit):
members = [{'address': address, 'port': port}]
- api.LocalLB.Pool.set_member_connection_limit(pool_names=[pool], members=[members], limits=[[limit]])
+ api.LocalLB.Pool.set_member_connection_limit(
+ pool_names=[pool],
+ members=[members],
+ limits=[[limit]]
+ )
+
def get_description(api, pool, address, port):
members = [{'address': address, 'port': port}]
- result = api.LocalLB.Pool.get_member_description(pool_names=[pool], members=[members])[0][0]
+ result = api.LocalLB.Pool.get_member_description(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
return result
+
def set_description(api, pool, address, port, description):
members = [{'address': address, 'port': port}]
- api.LocalLB.Pool.set_member_description(pool_names=[pool], members=[members], descriptions=[[description]])
+ api.LocalLB.Pool.set_member_description(
+ pool_names=[pool],
+ members=[members],
+ descriptions=[[description]]
+ )
+
def get_rate_limit(api, pool, address, port):
members = [{'address': address, 'port': port}]
- result = api.LocalLB.Pool.get_member_rate_limit(pool_names=[pool], members=[members])[0][0]
+ result = api.LocalLB.Pool.get_member_rate_limit(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
return result
+
def set_rate_limit(api, pool, address, port, limit):
members = [{'address': address, 'port': port}]
- api.LocalLB.Pool.set_member_rate_limit(pool_names=[pool], members=[members], limits=[[limit]])
+ api.LocalLB.Pool.set_member_rate_limit(
+ pool_names=[pool],
+ members=[members],
+ limits=[[limit]]
+ )
+
def get_ratio(api, pool, address, port):
members = [{'address': address, 'port': port}]
- result = api.LocalLB.Pool.get_member_ratio(pool_names=[pool], members=[members])[0][0]
+ result = api.LocalLB.Pool.get_member_ratio(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
return result
+
def set_ratio(api, pool, address, port, ratio):
members = [{'address': address, 'port': port}]
- api.LocalLB.Pool.set_member_ratio(pool_names=[pool], members=[members], ratios=[[ratio]])
+ api.LocalLB.Pool.set_member_ratio(
+ pool_names=[pool],
+ members=[members],
+ ratios=[[ratio]]
+ )
+
def set_member_session_enabled_state(api, pool, address, port, session_state):
members = [{'address': address, 'port': port}]
session_state = ["STATE_%s" % session_state.strip().upper()]
- api.LocalLB.Pool.set_member_session_enabled_state(pool_names=[pool], members=[members], session_states=[session_state])
+ api.LocalLB.Pool.set_member_session_enabled_state(
+ pool_names=[pool],
+ members=[members],
+ session_states=[session_state]
+ )
+
def get_member_session_status(api, pool, address, port):
members = [{'address': address, 'port': port}]
- result = api.LocalLB.Pool.get_member_session_status(pool_names=[pool], members=[members])[0][0]
+ result = api.LocalLB.Pool.get_member_session_status(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
result = result.split("SESSION_STATUS_")[-1].lower()
return result
+
def set_member_monitor_state(api, pool, address, port, monitor_state):
members = [{'address': address, 'port': port}]
monitor_state = ["STATE_%s" % monitor_state.strip().upper()]
- api.LocalLB.Pool.set_member_monitor_state(pool_names=[pool], members=[members], monitor_states=[monitor_state])
+ api.LocalLB.Pool.set_member_monitor_state(
+ pool_names=[pool],
+ members=[members],
+ monitor_states=[monitor_state]
+ )
+
def get_member_monitor_status(api, pool, address, port):
members = [{'address': address, 'port': port}]
- result = api.LocalLB.Pool.get_member_monitor_status(pool_names=[pool], members=[members])[0][0]
+ result = api.LocalLB.Pool.get_member_monitor_status(
+ pool_names=[pool],
+ members=[members]
+ )[0][0]
result = result.split("MONITOR_STATUS_")[-1].lower()
return result
+
def main():
- argument_spec = f5_argument_spec();
- argument_spec.update(dict(
- session_state = dict(type='str', choices=['enabled', 'disabled']),
- monitor_state = dict(type='str', choices=['enabled', 'disabled']),
- pool = dict(type='str', required=True),
- host = dict(type='str', required=True, aliases=['address', 'name']),
- port = dict(type='int', required=True),
- connection_limit = dict(type='int'),
- description = dict(type='str'),
- rate_limit = dict(type='int'),
- ratio = dict(type='int')
- )
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ session_state=dict(type='str', choices=['enabled', 'disabled']),
+ monitor_state=dict(type='str', choices=['enabled', 'disabled']),
+ pool=dict(type='str', required=True),
+ host=dict(type='str', required=True, aliases=['address', 'name']),
+ port=dict(type='int', required=True),
+ connection_limit=dict(type='int'),
+ description=dict(type='str'),
+ rate_limit=dict(type='int'),
+ ratio=dict(type='int'),
+ preserve_node=dict(type='bool', default=False)
)
+ argument_spec.update(meta_args)
module = AnsibleModule(
- argument_spec = argument_spec,
+ argument_spec=argument_spec,
supports_check_mode=True
)
- (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
+ if module.params['validate_certs']:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
+
session_state = module.params['session_state']
monitor_state = module.params['monitor_state']
pool = fq_name(partition, module.params['pool'])
@@ -336,18 +415,16 @@ def main():
host = module.params['host']
address = fq_name(partition, host)
port = module.params['port']
+ preserve_node = module.params['preserve_node']
-
- # sanity check user supplied values
-
- if (host and not port) or (port and not host):
+ if (host and port is None) or (port is not None and not host):
module.fail_json(msg="both host and port must be supplied")
- if 1 > port > 65535:
- module.fail_json(msg="valid ports must be in range 1 - 65535")
+ if 0 > port or port > 65535:
+ module.fail_json(msg="valid ports must be in range 0 - 65535")
try:
- api = bigip_api(server, user, password)
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
if not pool_exists(api, pool):
module.fail_json(msg="pool %s does not exist" % pool)
result = {'changed': False} # default
@@ -356,8 +433,11 @@ def main():
if member_exists(api, pool, address, port):
if not module.check_mode:
remove_pool_member(api, pool, address, port)
- deleted = delete_node_address(api, address)
- result = {'changed': True, 'deleted': deleted}
+ if preserve_node:
+ result = {'changed': True}
+ else:
+ deleted = delete_node_address(api, address)
+ result = {'changed': True, 'deleted': deleted}
else:
result = {'changed': True}
@@ -402,7 +482,7 @@ def main():
if not module.check_mode:
set_member_session_enabled_state(api, pool, address, port, session_state)
result = {'changed': True}
- elif session_state == 'disabled' and session_status != 'force_disabled':
+ elif session_state == 'disabled' and session_status != 'forced_disabled':
if not module.check_mode:
set_member_session_enabled_state(api, pool, address, port, session_state)
result = {'changed': True}
@@ -417,13 +497,13 @@ def main():
set_member_monitor_state(api, pool, address, port, monitor_state)
result = {'changed': True}
- except Exception, e:
+ except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
-# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_routedomain.py b/network/f5/bigip_routedomain.py
new file mode 100644
index 00000000000..7abe77abac2
--- /dev/null
+++ b/network/f5/bigip_routedomain.py
@@ -0,0 +1,530 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_routedomain
+short_description: Manage route domains on a BIG-IP
+description:
+ - Manage route domains on a BIG-IP
+version_added: "2.2"
+options:
+ bwc_policy:
+ description:
+ - The bandwidth controller for the route domain.
+ connection_limit:
+ description:
+ - The maximum number of concurrent connections allowed for the
+ route domain. Setting this to C(0) turns off connection limits.
+ description:
+ description:
+ - Specifies descriptive text that identifies the route domain.
+ flow_eviction_policy:
+ description:
+ - The eviction policy to use with this route domain. Apply an eviction
+ policy to provide customized responses to flow overflows and slow
+ flows on the route domain.
+ id:
+ description:
+ - The unique identifying integer representing the route domain.
+ required: true
+ parent:
+ description:
+ Specifies the route domain the system searches when it cannot
+ find a route in the configured domain.
+ required: false
+ routing_protocol:
+ description:
+ - Dynamic routing protocols for the system to use in the route domain.
+ choices:
+ - BFD
+ - BGP
+ - IS-IS
+ - OSPFv2
+ - OSPFv3
+ - PIM
+ - RIP
+ - RIPng
+ service_policy:
+ description:
+ - Service policy to associate with the route domain.
+ state:
+ description:
+ - Whether the route domain should exist or not.
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+ strict:
+ description:
+ - Specifies whether the system enforces cross-routing restrictions
+ or not.
+ choices:
+ - enabled
+ - disabled
+ vlans:
+ description:
+ - VLANs for the system to use in the route domain
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as
+ pip install f5-sdk.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Create a route domain
+ bigip_routedomain:
+ id: "1234"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "present"
+ user: "admin"
+ delegate_to: localhost
+
+- name: Set VLANs on the route domain
+ bigip_routedomain:
+ id: "1234"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "present"
+ user: "admin"
+ vlans:
+ - net1
+ - foo
+ delegate_to: localhost
+'''
+
+RETURN = '''
+id:
+ description: The ID of the route domain that was changed
+ returned: changed
+ type: int
+ sample: 2
+description:
+ description: The description of the route domain
+ returned: changed
+ type: string
+ sample: "route domain foo"
+strict:
+ description: The new strict isolation setting
+ returned: changed
+ type: string
+ sample: "enabled"
+parent:
+ description: The new parent route domain
+ returned: changed
+ type: int
+ sample: 0
+vlans:
+ description: List of new VLANs the route domain is applied to
+ returned: changed
+ type: list
+ sample: ['/Common/http-tunnel', '/Common/socks-tunnel']
+routing_protocol:
+ description: List of routing protocols applied to the route domain
+ returned: changed
+ type: list
+ sample: ['bfd', 'bgp']
+bwc_policy:
+ description: The new bandwidth controller
+ returned: changed
+ type: string
+ sample: /Common/foo
+connection_limit:
+ description: The new connection limit for the route domain
+ returned: changed
+ type: integer
+ sample: 100
+flow_eviction_policy:
+ description: The new eviction policy to use with this route domain
+ returned: changed
+ type: string
+ sample: /Common/default-eviction-policy
+service_policy:
+ description: The new service policy to use with this route domain
+ returned: changed
+ type: string
+ sample: /Common-my-service-policy
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+PROTOCOLS = [
+ 'BFD', 'BGP', 'IS-IS', 'OSPFv2', 'OSPFv3', 'PIM', 'RIP', 'RIPng'
+]
+
+STRICTS = ['enabled', 'disabled']
+
+
+class BigIpRouteDomain(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ kwargs['name'] = str(kwargs['id'])
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def absent(self):
+ if not self.exists():
+ return False
+
+ if self.params['check_mode']:
+ return True
+
+ rd = self.api.tm.net.route_domains.route_domain.load(
+ name=self.params['name']
+ )
+ rd.delete()
+
+ if self.exists():
+ raise F5ModuleError("Failed to delete the route domain")
+ else:
+ return True
+
+ def present(self):
+ if self.exists():
+ return self.update()
+ else:
+ if self.params['check_mode']:
+ return True
+ return self.create()
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ r = self.api.tm.net.route_domains.route_domain.load(
+ name=self.params['name']
+ )
+
+ p['id'] = int(r.id)
+ p['name'] = str(r.name)
+
+ if hasattr(r, 'connectionLimit'):
+ p['connection_limit'] = int(r.connectionLimit)
+ if hasattr(r, 'description'):
+ p['description'] = str(r.description)
+ if hasattr(r, 'strict'):
+ p['strict'] = str(r.strict)
+ if hasattr(r, 'parent'):
+ p['parent'] = r.parent
+ if hasattr(r, 'vlans'):
+ p['vlans'] = list(set([str(x) for x in r.vlans]))
+ if hasattr(r, 'routingProtocol'):
+ p['routing_protocol'] = list(set([str(x) for x in r.routingProtocol]))
+ if hasattr(r, 'flowEvictionPolicy'):
+ p['flow_eviction_policy'] = str(r.flowEvictionPolicy)
+ if hasattr(r, 'bwcPolicy'):
+ p['bwc_policy'] = str(r.bwcPolicy)
+ if hasattr(r, 'servicePolicy'):
+ p['service_policy'] = str(r.servicePolicy)
+ return p
+
+ def domains(self):
+ result = []
+
+ domains = self.api.tm.net.route_domains.get_collection()
+ for domain in domains:
+ # Just checking for the addition of the partition here for
+ # different versions of BIG-IP
+ if '/' + self.params['partition'] + '/' in domain.name:
+ result.append(domain.name)
+ else:
+ full_name = '/%s/%s' % (self.params['partition'], domain.name)
+ result.append(full_name)
+ return result
+
+ def create(self):
+ params = dict()
+ params['id'] = self.params['id']
+ params['name'] = self.params['name']
+
+ partition = self.params['partition']
+ description = self.params['description']
+ strict = self.params['strict']
+ parent = self.params['parent']
+ bwc_policy = self.params['bwc_policy']
+ vlans = self.params['vlans']
+ routing_protocol = self.params['routing_protocol']
+ connection_limit = self.params['connection_limit']
+ flow_eviction_policy = self.params['flow_eviction_policy']
+ service_policy = self.params['service_policy']
+
+ if description is not None:
+ params['description'] = description
+
+ if strict is not None:
+ params['strict'] = strict
+
+ if parent is not None:
+ parent = '/%s/%s' % (partition, parent)
+ if parent in self.domains():
+ params['parent'] = parent
+ else:
+ raise F5ModuleError(
+ "The parent route domain was not found"
+ )
+
+ if bwc_policy is not None:
+ policy = '/%s/%s' % (partition, bwc_policy)
+ params['bwcPolicy'] = policy
+
+ if vlans is not None:
+ params['vlans'] = []
+ for vlan in vlans:
+ vname = '/%s/%s' % (partition, vlan)
+ params['vlans'].append(vname)
+
+ if routing_protocol is not None:
+ params['routingProtocol'] = []
+ for protocol in routing_protocol:
+ if protocol in PROTOCOLS:
+ params['routingProtocol'].append(protocol)
+ else:
+ raise F5ModuleError(
+ "routing_protocol must be one of: %s" % (PROTOCOLS)
+ )
+
+ if connection_limit is not None:
+ params['connectionLimit'] = connection_limit
+
+ if flow_eviction_policy is not None:
+ policy = '/%s/%s' % (partition, flow_eviction_policy)
+ params['flowEvictionPolicy'] = policy
+
+ if service_policy is not None:
+ policy = '/%s/%s' % (partition, service_policy)
+ params['servicePolicy'] = policy
+
+ self.api.tm.net.route_domains.route_domain.create(**params)
+ exists = self.api.tm.net.route_domains.route_domain.exists(
+ name=self.params['name']
+ )
+
+ if exists:
+ return True
+ else:
+ raise F5ModuleError(
+ "An error occurred while creating the route domain"
+ )
+
+ def update(self):
+ changed = False
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ partition = self.params['partition']
+ description = self.params['description']
+ strict = self.params['strict']
+ parent = self.params['parent']
+ bwc_policy = self.params['bwc_policy']
+ vlans = self.params['vlans']
+ routing_protocol = self.params['routing_protocol']
+ connection_limit = self.params['connection_limit']
+ flow_eviction_policy = self.params['flow_eviction_policy']
+ service_policy = self.params['service_policy']
+
+ if description is not None:
+ if 'description' in current:
+ if description != current['description']:
+ params['description'] = description
+ else:
+ params['description'] = description
+
+ if strict is not None:
+ if strict != current['strict']:
+ params['strict'] = strict
+
+ if parent is not None:
+ parent = '/%s/%s' % (partition, parent)
+ if 'parent' in current:
+ if parent != current['parent']:
+ params['parent'] = parent
+ else:
+ params['parent'] = parent
+
+ if bwc_policy is not None:
+ policy = '/%s/%s' % (partition, bwc_policy)
+ if 'bwc_policy' in current:
+ if policy != current['bwc_policy']:
+ params['bwcPolicy'] = policy
+ else:
+ params['bwcPolicy'] = policy
+
+ if vlans is not None:
+ tmp = set()
+ for vlan in vlans:
+ vname = '/%s/%s' % (partition, vlan)
+ tmp.add(vname)
+ tmp = list(tmp)
+ if 'vlans' in current:
+ if tmp != current['vlans']:
+ params['vlans'] = tmp
+ else:
+ params['vlans'] = tmp
+
+ if routing_protocol is not None:
+ tmp = set()
+ for protocol in routing_protocol:
+ if protocol in PROTOCOLS:
+ tmp.add(protocol)
+ else:
+ raise F5ModuleError(
+ "routing_protocol must be one of: %s" % (PROTOCOLS)
+ )
+ tmp = list(tmp)
+ if 'routing_protocol' in current:
+ if tmp != current['routing_protocol']:
+ params['routingProtocol'] = tmp
+ else:
+ params['routingProtocol'] = tmp
+
+ if connection_limit is not None:
+ if connection_limit != current['connection_limit']:
+ params['connectionLimit'] = connection_limit
+
+ if flow_eviction_policy is not None:
+ policy = '/%s/%s' % (partition, flow_eviction_policy)
+ if 'flow_eviction_policy' in current:
+ if policy != current['flow_eviction_policy']:
+ params['flowEvictionPolicy'] = policy
+ else:
+ params['flowEvictionPolicy'] = policy
+
+ if service_policy is not None:
+ policy = '/%s/%s' % (partition, service_policy)
+ if 'service_policy' in current:
+ if policy != current['service_policy']:
+ params['servicePolicy'] = policy
+ else:
+ params['servicePolicy'] = policy
+
+ if params:
+ changed = True
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return changed
+ else:
+ return changed
+
+ try:
+ rd = self.api.tm.net.route_domains.route_domain.load(
+ name=self.params['name']
+ )
+ rd.update(**params)
+ rd.refresh()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(e)
+
+ return True
+
+ def exists(self):
+ return self.api.tm.net.route_domains.route_domain.exists(
+ name=self.params['name']
+ )
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+
+ if self.params['check_mode']:
+ if value == current:
+ changed = False
+ else:
+ changed = True
+ else:
+ if state == "present":
+ changed = self.present()
+ current = self.read()
+ result.update(current)
+ elif state == "absent":
+ changed = self.absent()
+
+ result.update(dict(changed=changed))
+ return result
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ id=dict(required=True, type='int'),
+ description=dict(required=False, default=None),
+ strict=dict(required=False, default=None, choices=STRICTS),
+ parent=dict(required=False, type='int', default=None),
+ vlans=dict(required=False, default=None, type='list'),
+ routing_protocol=dict(required=False, default=None, type='list'),
+ bwc_policy=dict(required=False, type='str', default=None),
+ connection_limit=dict(required=False, type='int', default=None),
+ flow_eviction_policy=dict(required=False, type='str', default=None),
+ service_policy=dict(required=False, type='str', default=None)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpRouteDomain(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_selfip.py b/network/f5/bigip_selfip.py
new file mode 100644
index 00000000000..d60dafbf7ce
--- /dev/null
+++ b/network/f5/bigip_selfip.py
@@ -0,0 +1,704 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_selfip
+short_description: Manage Self-IPs on a BIG-IP system
+description:
+ - Manage Self-IPs on a BIG-IP system
+version_added: "2.2"
+options:
+ address:
+ description:
+ - The IP addresses for the new self IP. This value is ignored upon update
+ as addresses themselves cannot be changed after they are created.
+ allow_service:
+ description:
+ - Configure port lockdown for the Self IP. By default, the Self IP has a
+ "default deny" policy. This can be changed to allow TCP and UDP ports
+ as well as specific protocols. This list should contain C(protocol):C(port)
+ values.
+ name:
+ description:
+ - The self IP to create.
+ required: true
+ default: Value of C(address)
+ netmask:
+ description:
+ - The netmasks for the self IP.
+ required: true
+ state:
+ description:
+ - The state of the variable on the system. When C(present), guarantees
+ that the Self-IP exists with the provided attributes. When C(absent),
+ removes the Self-IP from the system.
+ required: false
+ default: present
+ choices:
+ - absent
+ - present
+ traffic_group:
+ description:
+ - The traffic group for the self IP addresses in an active-active,
+ redundant load balancer configuration.
+ required: false
+ vlan:
+ description:
+ - The VLAN that the new self IPs will be on.
+ required: true
+ route_domain:
+ description:
+ - The route domain id of the system.
+ If none, id of the route domain will be "0" (default route domain)
+ required: false
+ default: none
+ version_added: 2.3
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+ - Requires the netaddr Python package on the host.
+extends_documentation_fragment: f5
+requirements:
+ - netaddr
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Create Self IP
+ bigip_selfip:
+ address: "10.10.10.10"
+ name: "self1"
+ netmask: "255.255.255.0"
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ validate_certs: "no"
+ vlan: "vlan1"
+ delegate_to: localhost
+
+- name: Create Self IP with a Route Domain
+ bigip_selfip:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ validate_certs: "no"
+ name: "self1"
+ address: "10.10.10.10"
+ netmask: "255.255.255.0"
+ vlan: "vlan1"
+ route_domain: "10"
+ allow_service: "default"
+ delegate_to: localhost
+
+- name: Delete Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+
+- name: Allow management web UI to be accessed on this Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ allow_service:
+ - "tcp:443"
+ delegate_to: localhost
+
+- name: Allow HTTPS and SSH access to this Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ allow_service:
+ - "tcp:443"
+ - "tpc:22"
+ delegate_to: localhost
+
+- name: Allow all services access to this Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ allow_service:
+ - all
+ delegate_to: localhost
+
+- name: Allow only GRE and IGMP protocols access to this Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ allow_service:
+ - gre:0
+ - igmp:0
+ delegate_to: localhost
+
+- name: Allow all TCP, but no other protocols access to this Self IP
+ bigip_selfip:
+ name: "self1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ state: "absent"
+ user: "admin"
+ validate_certs: "no"
+ allow_service:
+ - tcp:0
+ delegate_to: localhost
+'''
+
+RETURN = '''
+allow_service:
+ description: Services that allowed via this Self IP
+ returned: changed
+ type: list
+ sample: ['igmp:0','tcp:22','udp:53']
+address:
+ description: The address for the Self IP
+ returned: created
+ type: string
+ sample: "192.0.2.10"
+name:
+ description: The name of the Self IP
+ returned:
+ - created
+ - changed
+ - deleted
+ type: string
+ sample: "self1"
+netmask:
+ description: The netmask of the Self IP
+ returned:
+ - changed
+ - created
+ type: string
+ sample: "255.255.255.0"
+traffic_group:
+ description: The traffic group that the Self IP is a member of
+ return:
+ - changed
+ - created
+ type: string
+ sample: "traffic-group-local-only"
+vlan:
+ description: The VLAN set on the Self IP
+ return:
+ - changed
+ - created
+ type: string
+ sample: "vlan1"
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+try:
+ from netaddr import IPNetwork, AddrFormatError
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+FLOAT = ['enabled', 'disabled']
+DEFAULT_TG = 'traffic-group-local-only'
+ALLOWED_PROTOCOLS = ['eigrp', 'egp', 'gre', 'icmp', 'igmp', 'igp', 'ipip',
+ 'l2tp', 'ospf', 'pim', 'tcp', 'udp']
+
+
+class BigIpSelfIp(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def present(self):
+ changed = False
+
+ if self.exists():
+ changed = self.update()
+ else:
+ changed = self.create()
+
+ return changed
+
+ def absent(self):
+ changed = False
+
+ if self.exists():
+ changed = self.delete()
+
+ return changed
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+
+ :return: List of values currently stored in BIG-IP, formatted for use
+ in this class.
+ """
+ p = dict()
+ name = self.params['name']
+ partition = self.params['partition']
+ r = self.api.tm.net.selfips.selfip.load(
+ name=name,
+ partition=partition
+ )
+
+ if hasattr(r, 'address'):
+ p['route_domain'] = str(None)
+ if '%' in r.address:
+ ipaddr = []
+ ipaddr = r.address.split('%', 1)
+ rdmask = ipaddr[1].split('/', 1)
+ r.address = "%s/%s" % (ipaddr[0], rdmask[1])
+ p['route_domain'] = str(rdmask[0])
+ ipnet = IPNetwork(r.address)
+ p['address'] = str(ipnet.ip)
+ p['netmask'] = str(ipnet.netmask)
+ if hasattr(r, 'trafficGroup'):
+ p['traffic_group'] = str(r.trafficGroup)
+ if hasattr(r, 'vlan'):
+ p['vlan'] = str(r.vlan)
+ if hasattr(r, 'allowService'):
+ if r.allowService == 'all':
+ p['allow_service'] = set(['all'])
+ else:
+ p['allow_service'] = set([str(x) for x in r.allowService])
+ else:
+ p['allow_service'] = set(['none'])
+ p['name'] = name
+ return p
+
+ def verify_services(self):
+ """Verifies that a supplied service string has correct format
+
+ The string format for port lockdown is PROTOCOL:PORT. This method
+ will verify that the provided input matches the allowed protocols
+ and the port ranges before submitting to BIG-IP.
+
+ The only allowed exceptions to this rule are the following values
+
+ * all
+ * default
+ * none
+
+ These are special cases that are handled differently in the API.
+ "all" is set as a string, "default" is set as a one item list, and
+ "none" removes the key entirely from the REST API.
+
+ :raises F5ModuleError:
+ """
+ result = []
+ for svc in self.params['allow_service']:
+ if svc in ['all', 'none', 'default']:
+ result = [svc]
+ break
+
+ tmp = svc.split(':')
+ if tmp[0] not in ALLOWED_PROTOCOLS:
+ raise F5ModuleError(
+ "The provided protocol '%s' is invalid" % (tmp[0])
+ )
+ try:
+ port = int(tmp[1])
+ except Exception:
+ raise F5ModuleError(
+ "The provided port '%s' is not a number" % (tmp[1])
+ )
+
+ if port < 0 or port > 65535:
+ raise F5ModuleError(
+ "The provided port '%s' must be between 0 and 65535"
+ % (port)
+ )
+ else:
+ result.append(svc)
+ return set(result)
+
+ def fmt_services(self, services):
+ """Returns services formatted for consumption by f5-sdk update
+
+ The BIG-IP endpoint for services takes different values depending on
+ what you want the "allowed services" to be. It can be any of the
+ following
+
+ - a list containing "protocol:port" values
+ - the string "all"
+ - a null value, or None
+
+ This is a convenience function to massage the values the user has
+ supplied so that they are formatted in such a way that BIG-IP will
+ accept them and apply the specified policy.
+
+ :param services: The services to format. This is always a Python set
+ :return:
+ """
+ result = list(services)
+ if result[0] == 'all':
+ return 'all'
+ elif result[0] == 'none':
+ return None
+ else:
+ return list(services)
+
+ def traffic_groups(self):
+ result = []
+
+ groups = self.api.tm.cm.traffic_groups.get_collection()
+ for group in groups:
+ # Just checking for the addition of the partition here for
+ # different versions of BIG-IP
+ if '/' + self.params['partition'] + '/' in group.name:
+ result.append(group.name)
+ else:
+ full_name = '/%s/%s' % (self.params['partition'], group.name)
+ result.append(str(full_name))
+ return result
+
+ def update(self):
+ changed = False
+ svcs = []
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ address = self.params['address']
+ allow_service = self.params['allow_service']
+ name = self.params['name']
+ netmask = self.params['netmask']
+ partition = self.params['partition']
+ traffic_group = self.params['traffic_group']
+ vlan = self.params['vlan']
+ route_domain = self.params['route_domain']
+
+ if address is not None and address != current['address']:
+ raise F5ModuleError(
+ 'Self IP addresses cannot be updated'
+ )
+
+ if netmask is not None:
+ # I ignore the address value here even if they provide it because
+ # you are not allowed to change it.
+ try:
+ address = IPNetwork(current['address'])
+
+ new_addr = "%s/%s" % (address.ip, netmask)
+ nipnet = IPNetwork(new_addr)
+ if route_domain is not None:
+ nipnet = "%s%s%s" % (address.ip, route_domain, netmask)
+
+ cur_addr = "%s/%s" % (current['address'], current['netmask'])
+ cipnet = IPNetwork(cur_addr)
+ if route_domain is not None:
+ cipnet = "%s%s%s" % (current['address'], current['route_domain'], current['netmask'])
+
+ if nipnet != cipnet:
+ if route_domain is not None:
+ address = "%s%s%s/%s" % (address.ip, '%', route_domain, netmask)
+ else:
+ address = "%s/%s" % (nipnet.ip, nipnet.prefixlen)
+ params['address'] = address
+ except AddrFormatError:
+ raise F5ModuleError(
+ 'The provided address/netmask value was invalid'
+ )
+
+ if traffic_group is not None:
+ traffic_group = "/%s/%s" % (partition, traffic_group)
+ if traffic_group not in self.traffic_groups():
+ raise F5ModuleError(
+ 'The specified traffic group was not found'
+ )
+
+ if 'traffic_group' in current:
+ if traffic_group != current['traffic_group']:
+ params['trafficGroup'] = traffic_group
+ else:
+ params['trafficGroup'] = traffic_group
+
+ if vlan is not None:
+ vlans = self.get_vlans()
+ vlan = "/%s/%s" % (partition, vlan)
+
+ if 'vlan' in current:
+ if vlan != current['vlan']:
+ params['vlan'] = vlan
+ else:
+ params['vlan'] = vlan
+
+ if vlan not in vlans:
+ raise F5ModuleError(
+ 'The specified VLAN was not found'
+ )
+
+ if allow_service is not None:
+ svcs = self.verify_services()
+ if 'allow_service' in current:
+ if svcs != current['allow_service']:
+ params['allowService'] = self.fmt_services(svcs)
+ else:
+ params['allowService'] = self.fmt_services(svcs)
+
+ if params:
+ changed = True
+ params['name'] = name
+ params['partition'] = partition
+ if check_mode:
+ return changed
+ self.cparams = camel_dict_to_snake_dict(params)
+ if svcs:
+ self.cparams['allow_service'] = list(svcs)
+ else:
+ return changed
+
+ r = self.api.tm.net.selfips.selfip.load(
+ name=name,
+ partition=partition
+ )
+ r.update(**params)
+ r.refresh()
+
+ return True
+
+ def get_vlans(self):
+ """Returns formatted list of VLANs
+
+ The VLAN values stored in BIG-IP are done so using their fully
+ qualified name which includes the partition. Therefore, "correct"
+ values according to BIG-IP look like this
+
+ /Common/vlan1
+
+ This is in contrast to the formats that most users think of VLANs
+ as being stored as
+
+ vlan1
+
+ To provide for the consistent user experience while not turfing
+ BIG-IP, we need to massage the values that are provided by the
+ user so that they include the partition.
+
+ :return: List of vlans formatted with preceeding partition
+ """
+ partition = self.params['partition']
+ vlans = self.api.tm.net.vlans.get_collection()
+ return [str("/" + partition + "/" + x.name) for x in vlans]
+
+ def create(self):
+ params = dict()
+
+ svcs = []
+ check_mode = self.params['check_mode']
+ address = self.params['address']
+ allow_service = self.params['allow_service']
+ name = self.params['name']
+ netmask = self.params['netmask']
+ partition = self.params['partition']
+ traffic_group = self.params['traffic_group']
+ vlan = self.params['vlan']
+ route_domain = self.params['route_domain']
+
+ if address is None or netmask is None:
+ raise F5ModuleError(
+ 'An address and a netmask must be specififed'
+ )
+
+ if vlan is None:
+ raise F5ModuleError(
+ 'A VLAN name must be specified'
+ )
+ else:
+ vlan = "/%s/%s" % (partition, vlan)
+
+ try:
+ ipin = "%s/%s" % (address, netmask)
+ ipnet = IPNetwork(ipin)
+ if route_domain is not None:
+ params['address'] = "%s%s%s/%s" % (ipnet.ip, '%', route_domain, ipnet.prefixlen)
+ else:
+ params['address'] = "%s/%s" % (ipnet.ip, ipnet.prefixlen)
+ except AddrFormatError:
+ raise F5ModuleError(
+ 'The provided address/netmask value was invalid'
+ )
+
+ if traffic_group is None:
+ params['trafficGroup'] = "/%s/%s" % (partition, DEFAULT_TG)
+ else:
+ traffic_group = "/%s/%s" % (partition, traffic_group)
+ if traffic_group in self.traffic_groups():
+ params['trafficGroup'] = traffic_group
+ else:
+ raise F5ModuleError(
+ 'The specified traffic group was not found'
+ )
+
+ vlans = self.get_vlans()
+ if vlan in vlans:
+ params['vlan'] = vlan
+ else:
+ raise F5ModuleError(
+ 'The specified VLAN was not found'
+ )
+
+ if allow_service is not None:
+ svcs = self.verify_services()
+ params['allowService'] = self.fmt_services(svcs)
+
+ params['name'] = name
+ params['partition'] = partition
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if svcs:
+ self.cparams['allow_service'] = list(svcs)
+
+ if check_mode:
+ return True
+
+ d = self.api.tm.net.selfips.selfip
+ d.create(**params)
+
+ if self.exists():
+ return True
+ else:
+ raise F5ModuleError("Failed to create the self IP")
+
+ def delete(self):
+ params = dict()
+ check_mode = self.params['check_mode']
+
+ params['name'] = self.params['name']
+ params['partition'] = self.params['partition']
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ dc = self.api.tm.net.selfips.selfip.load(**params)
+ dc.delete()
+
+ if self.exists():
+ raise F5ModuleError("Failed to delete the self IP")
+ return True
+
+ def exists(self):
+ name = self.params['name']
+ partition = self.params['partition']
+ return self.api.tm.net.selfips.selfip.exists(
+ name=name,
+ partition=partition
+ )
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+
+ try:
+ if state == "present":
+ changed = self.present()
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ address=dict(required=False, default=None),
+ allow_service=dict(type='list', default=None),
+ name=dict(required=True),
+ netmask=dict(required=False, default=None),
+ traffic_group=dict(required=False, default=None),
+ vlan=dict(required=False, default=None),
+ route_domain=dict(required=False, default=None)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ try:
+ if not HAS_NETADDR:
+ raise F5ModuleError(
+ "The netaddr python module is required."
+ )
+
+ obj = BigIpSelfIp(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_snat_pool.py b/network/f5/bigip_snat_pool.py
new file mode 100644
index 00000000000..52341e4dfe8
--- /dev/null
+++ b/network/f5/bigip_snat_pool.py
@@ -0,0 +1,417 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_snat_pool
+short_description: Manage SNAT pools on a BIG-IP.
+description:
+ - Manage SNAT pools on a BIG-IP.
+version_added: "2.3"
+options:
+ append:
+ description:
+ - When C(yes), will only add members to the SNAT pool. When C(no), will
+ replace the existing member list with the provided member list.
+ choices:
+ - yes
+ - no
+ default: no
+ members:
+ description:
+ - List of members to put in the SNAT pool. When a C(state) of present is
+ provided, this parameter is required. Otherwise, it is optional.
+ required: false
+ default: None
+ aliases: ['member']
+ name:
+ description: The name of the SNAT pool.
+ required: True
+ state:
+ description:
+ - Whether the SNAT pool should exist or not.
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as
+ pip install f5-sdk
+ - Requires the netaddr Python package on the host. This is as easy as
+ pip install netaddr
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Add the SNAT pool 'my-snat-pool'
+ bigip_snat_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my-snat-pool"
+ state: "present"
+ members:
+ - 10.10.10.10
+ - 20.20.20.20
+ delegate_to: localhost
+
+- name: Change the SNAT pool's members to a single member
+ bigip_snat_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my-snat-pool"
+ state: "present"
+ member: "30.30.30.30"
+ delegate_to: localhost
+
+- name: Append a new list of members to the existing pool
+ bigip_snat_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "my-snat-pool"
+ state: "present"
+ members:
+ - 10.10.10.10
+ - 20.20.20.20
+ delegate_to: localhost
+
+- name: Remove the SNAT pool 'my-snat-pool'
+ bigip_snat_pool:
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ name: "johnd"
+ state: "absent"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+members:
+ description:
+ - List of members that are part of the SNAT pool.
+ returned: changed and success
+ type: list
+ sample: "['10.10.10.10']"
+'''
+
+try:
+ from f5.bigip.contexts import TransactionContextManager
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+try:
+ from netaddr import IPAddress, AddrFormatError
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+
+class BigIpSnatPoolManager(object):
+ def __init__(self, *args, **kwargs):
+ self.changed_params = dict()
+ self.params = kwargs
+ self.api = None
+
+ def apply_changes(self):
+ result = dict()
+
+ changed = self.apply_to_running_config()
+ if changed:
+ self.save_running_config()
+
+ result.update(**self.changed_params)
+ result.update(dict(changed=changed))
+ return result
+
+ def apply_to_running_config(self):
+ try:
+ self.api = self.connect_to_bigip(**self.params)
+ if self.params['state'] == "present":
+ return self.present()
+ elif self.params['state'] == "absent":
+ return self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ def save_running_config(self):
+ self.api.tm.sys.config.exec_cmd('save')
+
+ def present(self):
+ if self.params['members'] is None:
+ raise F5ModuleError(
+ "The members parameter must be specified"
+ )
+
+ if self.snat_pool_exists():
+ return self.update_snat_pool()
+ else:
+ return self.ensure_snat_pool_is_present()
+
+ def absent(self):
+ changed = False
+ if self.snat_pool_exists():
+ changed = self.ensure_snat_pool_is_absent()
+ return changed
+
+ def connect_to_bigip(self, **kwargs):
+ return ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def read_snat_pool_information(self):
+ pool = self.load_snat_pool()
+ return self.format_snat_pool_information(pool)
+
+ def format_snat_pool_information(self, pool):
+ """Ensure that the pool information is in a standard format
+
+ The SDK provides information back in a format that may change with
+ the version of BIG-IP being worked with. Therefore, we need to make
+ sure that the data is formatted in a way that our module expects it.
+
+ Additionally, this takes care of minor variations between Python 2
+ and Python 3.
+
+ :param pool:
+ :return:
+ """
+ result = dict()
+ result['name'] = str(pool.name)
+ if hasattr(pool, 'members'):
+ result['members'] = self.format_current_members(pool)
+ return result
+
+ def format_current_members(self, pool):
+ result = set()
+ partition_prefix = "/{0}/".format(self.params['partition'])
+
+ for member in pool.members:
+ member = str(member.replace(partition_prefix, ''))
+ result.update([member])
+ return list(result)
+
+ def load_snat_pool(self):
+ return self.api.tm.ltm.snatpools.snatpool.load(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+
+ def snat_pool_exists(self):
+ return self.api.tm.ltm.snatpools.snatpool.exists(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+
+ def update_snat_pool(self):
+ params = self.get_changed_parameters()
+ if params:
+ self.changed_params = camel_dict_to_snake_dict(params)
+ if self.params['check_mode']:
+ return True
+ else:
+ return False
+ params['name'] = self.params['name']
+ params['partition'] = self.params['partition']
+ self.update_snat_pool_on_device(params)
+ return True
+
+ def update_snat_pool_on_device(self, params):
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ r = api.tm.ltm.snatpools.snatpool.load(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+ r.modify(**params)
+
+ def get_changed_parameters(self):
+ result = dict()
+ current = self.read_snat_pool_information()
+ if self.are_members_changed(current):
+ result['members'] = self.get_new_member_list(current['members'])
+ return result
+
+ def are_members_changed(self, current):
+ if self.params['members'] is None:
+ return False
+ if 'members' not in current:
+ return True
+ if set(self.params['members']) == set(current['members']):
+ return False
+ if not self.params['append']:
+ return True
+
+ # Checking to see if the supplied list is a subset of the current
+ # list is only relevant if the `append` parameter is provided.
+ new_members = set(self.params['members'])
+ current_members = set(current['members'])
+ if new_members.issubset(current_members):
+ return False
+ else:
+ return True
+
+ def get_new_member_list(self, current_members):
+ result = set()
+
+ if self.params['append']:
+ result.update(set(current_members))
+ result.update(set(self.params['members']))
+ else:
+ result.update(set(self.params['members']))
+ return list(result)
+
+ def ensure_snat_pool_is_present(self):
+ params = self.get_snat_pool_creation_parameters()
+ self.changed_params = camel_dict_to_snake_dict(params)
+ if self.params['check_mode']:
+ return True
+ self.create_snat_pool_on_device(params)
+ if self.snat_pool_exists():
+ return True
+ else:
+ raise F5ModuleError("Failed to create the SNAT pool")
+
+ def get_snat_pool_creation_parameters(self):
+ members = self.get_formatted_members_list()
+ return dict(
+ name=self.params['name'],
+ partition=self.params['partition'],
+ members=members
+ )
+
+ def get_formatted_members_list(self):
+ result = set()
+ try:
+ for ip in self.params['members']:
+ address = str(IPAddress(ip))
+ result.update([address])
+ return list(result)
+ except AddrFormatError:
+ raise F5ModuleError(
+ 'The provided member address is not a valid IP address'
+ )
+
+ def create_snat_pool_on_device(self, params):
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ api.tm.ltm.snatpools.snatpool.create(**params)
+
+ def ensure_snat_pool_is_absent(self):
+ if self.params['check_mode']:
+ return True
+ self.delete_snat_pool_from_device()
+ if self.snat_pool_exists():
+ raise F5ModuleError("Failed to delete the SNAT pool")
+ return True
+
+ def delete_snat_pool_from_device(self):
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ pool = api.tm.ltm.snatpools.snatpool.load(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+ pool.delete()
+
+
+class BigIpSnatPoolModuleConfig(object):
+ def __init__(self):
+ self.argument_spec = dict()
+ self.meta_args = dict()
+ self.supports_check_mode = True
+ self.states = ['absent', 'present']
+
+ self.initialize_meta_args()
+ self.initialize_argument_spec()
+
+ def initialize_meta_args(self):
+ args = dict(
+ append=dict(
+ default=False,
+ type='bool',
+ choices=BOOLEANS
+ ),
+ name=dict(required=True),
+ members=dict(
+ required=False,
+ default=None,
+ type='list',
+ aliases=['member']
+ ),
+ state=dict(
+ default='present',
+ choices=self.states
+ )
+ )
+ self.meta_args = args
+
+ def initialize_argument_spec(self):
+ self.argument_spec = f5_argument_spec()
+ self.argument_spec.update(self.meta_args)
+
+ def create(self):
+ return AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=self.supports_check_mode
+ )
+
+
+def main():
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ if not HAS_NETADDR:
+ raise F5ModuleError("The python netaddr module is required")
+
+ config = BigIpSnatPoolModuleConfig()
+ module = config.create()
+
+ try:
+ obj = BigIpSnatPoolManager(
+ check_mode=module.check_mode, **module.params
+ )
+ result = obj.apply_changes()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_ssl_certificate.py b/network/f5/bigip_ssl_certificate.py
new file mode 100644
index 00000000000..fe0a753e834
--- /dev/null
+++ b/network/f5/bigip_ssl_certificate.py
@@ -0,0 +1,520 @@
+#!/usr/bin/python
+#
+# (c) 2016, Kevin Coming (@waffie1)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: bigip_ssl_certificate
+short_description: Import/Delete certificates from BIG-IP
+description:
+ - This module will import/delete SSL certificates on BIG-IP LTM.
+ Certificates can be imported from certificate and key files on the local
+ disk, in PEM format.
+version_added: 2.2
+options:
+ cert_content:
+ description:
+ - When used instead of 'cert_src', sets the contents of a certificate directly
+ to the specified value. This is used with lookup plugins or for anything
+ with formatting or templating. Either one of C(key_src),
+ C(key_content), C(cert_src) or C(cert_content) must be provided when
+ C(state) is C(present).
+ required: false
+ key_content:
+ description:
+ - When used instead of 'key_src', sets the contents of a certificate key
+ directly to the specified value. This is used with lookup plugins or for
+ anything with formatting or templating. Either one of C(key_src),
+ C(key_content), C(cert_src) or C(cert_content) must be provided when
+ C(state) is C(present).
+ required: false
+ state:
+ description:
+ - Certificate and key state. This determines if the provided certificate
+ and key is to be made C(present) on the device or C(absent).
+ required: true
+ default: present
+ choices:
+ - present
+ - absent
+ partition:
+ description:
+ - BIG-IP partition to use when adding/deleting certificate.
+ required: false
+ default: Common
+ name:
+ description:
+ - SSL Certificate Name. This is the cert/key pair name used
+ when importing a certificate/key into the F5. It also
+ determines the filenames of the objects on the LTM
+ (:Partition:name.cer_11111_1 and :Partition_name.key_11111_1).
+ required: true
+ cert_src:
+ description:
+ - This is the local filename of the certificate. Either one of C(key_src),
+ C(key_content), C(cert_src) or C(cert_content) must be provided when
+ C(state) is C(present).
+ required: false
+ key_src:
+ description:
+ - This is the local filename of the private key. Either one of C(key_src),
+ C(key_content), C(cert_src) or C(cert_content) must be provided when
+ C(state) is C(present).
+ required: false
+ passphrase:
+ description:
+ - Passphrase on certificate private key
+ required: false
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+ - Requires the netaddr Python package on the host.
+ - If you use this module, you will not be able to remove the certificates
+ and keys that are managed, via the web UI. You can only remove them via
+ tmsh or these modules.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk >= 1.5.0
+ - BigIP >= v12
+author:
+ - Kevin Coming (@waffie1)
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Import PEM Certificate from local disk
+ bigip_ssl_certificate:
+ name: "certificate-name"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ cert_src: "/path/to/cert.crt"
+ key_src: "/path/to/key.key"
+ delegate_to: localhost
+
+- name: Use a file lookup to import PEM Certificate
+ bigip_ssl_certificate:
+ name: "certificate-name"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "present"
+ cert_content: "{{ lookup('file', '/path/to/cert.crt') }}"
+ key_content: "{{ lookup('file', '/path/to/key.key') }}"
+ delegate_to: localhost
+
+- name: "Delete Certificate"
+ bigip_ssl_certificate:
+ name: "certificate-name"
+ server: "lb.mydomain.com"
+ user: "admin"
+ password: "secret"
+ state: "absent"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+cert_name:
+ description: >
+ The name of the SSL certificate. The C(cert_name) and
+ C(key_name) will be equal to each other.
+ returned:
+ - created
+ - changed
+ - deleted
+ type: string
+ sample: "cert1"
+key_name:
+ description: >
+ The name of the SSL certificate key. The C(key_name) and
+ C(cert_name) will be equal to each other.
+ returned:
+ - created
+ - changed
+ - deleted
+ type: string
+ sample: "key1"
+partition:
+ description: Partition in which the cert/key was created
+ returned:
+ - changed
+ - created
+ - deleted
+ type: string
+ sample: "Common"
+key_checksum:
+ description: SHA1 checksum of the key that was provided
+ return:
+ - changed
+ - created
+ type: string
+ sample: "cf23df2207d99a74fbe169e3eba035e633b65d94"
+cert_checksum:
+ description: SHA1 checksum of the cert that was provided
+ return:
+ - changed
+ - created
+ type: string
+ sample: "f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0"
+'''
+
+
+try:
+ from f5.bigip.contexts import TransactionContextManager
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+import hashlib
+import StringIO
+
+
+class BigIpSslCertificate(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ required_args = ['key_content', 'key_src', 'cert_content', 'cert_src']
+
+ ksource = kwargs['key_src']
+ if ksource:
+ with open(ksource) as f:
+ kwargs['key_content'] = f.read()
+
+ csource = kwargs['cert_src']
+ if csource:
+ with open(csource) as f:
+ kwargs['cert_content'] = f.read()
+
+ if kwargs['state'] == 'present':
+ if not any(kwargs[k] is not None for k in required_args):
+ raise F5ModuleError(
+ "Either 'key_content', 'key_src', 'cert_content' or "
+ "'cert_src' must be provided"
+ )
+
+ # This is the remote BIG-IP path from where it will look for certs
+ # to install.
+ self.dlpath = '/var/config/rest/downloads'
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def exists(self):
+ cert = self.cert_exists()
+ key = self.key_exists()
+
+ if cert and key:
+ return True
+ else:
+ return False
+
+ def get_hash(self, content):
+ k = hashlib.sha1()
+ s = StringIO.StringIO(content)
+ while True:
+ data = s.read(1024)
+ if not data:
+ break
+ k.update(data)
+ return k.hexdigest()
+
+ def present(self):
+ current = self.read()
+ changed = False
+ do_key = False
+ do_cert = False
+ chash = None
+ khash = None
+
+ check_mode = self.params['check_mode']
+ name = self.params['name']
+ partition = self.params['partition']
+ cert_content = self.params['cert_content']
+ key_content = self.params['key_content']
+ passphrase = self.params['passphrase']
+
+ # Technically you dont need to provide us with anything in the form
+ # of content for your cert, but that's kind of illogical, so we just
+ # return saying you didn't "do" anything if you left the cert and keys
+ # empty.
+ if not cert_content and not key_content:
+ return False
+
+ if key_content is not None:
+ if 'key_checksum' in current:
+ khash = self.get_hash(key_content)
+ if khash not in current['key_checksum']:
+ do_key = "update"
+ else:
+ do_key = "create"
+
+ if cert_content is not None:
+ if 'cert_checksum' in current:
+ chash = self.get_hash(cert_content)
+ if chash not in current['cert_checksum']:
+ do_cert = "update"
+ else:
+ do_cert = "create"
+
+ if do_cert or do_key:
+ changed = True
+ params = dict()
+ params['cert_name'] = name
+ params['key_name'] = name
+ params['partition'] = partition
+ if khash:
+ params['key_checksum'] = khash
+ if chash:
+ params['cert_checksum'] = chash
+ self.cparams = params
+
+ if check_mode:
+ return changed
+
+ if not do_cert and not do_key:
+ return False
+
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ if do_cert:
+ # Upload the content of a certificate as a StringIO object
+ cstring = StringIO.StringIO(cert_content)
+ filename = "%s.crt" % (name)
+ filepath = os.path.join(self.dlpath, filename)
+ api.shared.file_transfer.uploads.upload_stringio(
+ cstring,
+ filename
+ )
+
+ if do_cert == "update":
+ # Install the certificate
+ params = {
+ 'name': name,
+ 'partition': partition
+ }
+ cert = api.tm.sys.file.ssl_certs.ssl_cert.load(**params)
+
+ # This works because, while the source path is the same,
+ # calling update causes the file to be re-read
+ cert.update()
+ changed = True
+ elif do_cert == "create":
+ # Install the certificate
+ params = {
+ 'sourcePath': "file://" + filepath,
+ 'name': name,
+ 'partition': partition
+ }
+ api.tm.sys.file.ssl_certs.ssl_cert.create(**params)
+ changed = True
+
+ if do_key:
+ # Upload the content of a certificate key as a StringIO object
+ kstring = StringIO.StringIO(key_content)
+ filename = "%s.key" % (name)
+ filepath = os.path.join(self.dlpath, filename)
+ api.shared.file_transfer.uploads.upload_stringio(
+ kstring,
+ filename
+ )
+
+ if do_key == "update":
+ # Install the key
+ params = {
+ 'name': name,
+ 'partition': partition
+ }
+ key = api.tm.sys.file.ssl_keys.ssl_key.load(**params)
+
+ params = dict()
+
+ if passphrase:
+ params['passphrase'] = passphrase
+ else:
+ params['passphrase'] = None
+
+ key.update(**params)
+ changed = True
+ elif do_key == "create":
+ # Install the key
+ params = {
+ 'sourcePath': "file://" + filepath,
+ 'name': name,
+ 'partition': partition
+ }
+ if passphrase:
+ params['passphrase'] = self.params['passphrase']
+ else:
+ params['passphrase'] = None
+
+ api.tm.sys.file.ssl_keys.ssl_key.create(**params)
+ changed = True
+ return changed
+
+ def key_exists(self):
+ return self.api.tm.sys.file.ssl_keys.ssl_key.exists(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+
+ def cert_exists(self):
+ return self.api.tm.sys.file.ssl_certs.ssl_cert.exists(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+
+ def read(self):
+ p = dict()
+ name = self.params['name']
+ partition = self.params['partition']
+
+ if self.key_exists():
+ key = self.api.tm.sys.file.ssl_keys.ssl_key.load(
+ name=name,
+ partition=partition
+ )
+ if hasattr(key, 'checksum'):
+ p['key_checksum'] = str(key.checksum)
+
+ if self.cert_exists():
+ cert = self.api.tm.sys.file.ssl_certs.ssl_cert.load(
+ name=name,
+ partition=partition
+ )
+ if hasattr(cert, 'checksum'):
+ p['cert_checksum'] = str(cert.checksum)
+
+ p['name'] = name
+ return p
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+
+ try:
+ if state == "present":
+ changed = self.present()
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+ def absent(self):
+ changed = False
+
+ if self.exists():
+ changed = self.delete()
+
+ return changed
+
+ def delete(self):
+ changed = False
+
+ check_mode = self.params['check_mode']
+
+ delete_cert = self.cert_exists()
+ delete_key = self.key_exists()
+
+ if not delete_cert and not delete_key:
+ return changed
+
+ if check_mode:
+ params = dict()
+ params['cert_name'] = name
+ params['key_name'] = name
+ params['partition'] = partition
+ self.cparams = params
+ return True
+
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ if delete_cert:
+ # Delete the certificate
+ c = api.tm.sys.file.ssl_certs.ssl_cert.load(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+ c.delete()
+ changed = True
+
+ if delete_key:
+ # Delete the certificate key
+ k = self.api.tm.sys.file.ssl_keys.ssl_key.load(
+ name=self.params['name'],
+ partition=self.params['partition']
+ )
+ k.delete()
+ changed = True
+ return changed
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ name=dict(type='str', required=True),
+ cert_content=dict(type='str', default=None),
+ cert_src=dict(type='path', default=None),
+ key_content=dict(type='str', default=None),
+ key_src=dict(type='path', default=None),
+ passphrase=dict(type='str', default=None, no_log=True)
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['key_content', 'key_src'],
+ ['cert_content', 'cert_src']
+ ]
+ )
+
+ try:
+ obj = BigIpSslCertificate(check_mode=module.check_mode,
+ **module.params)
+ result = obj.flush()
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_sys_db.py b/network/f5/bigip_sys_db.py
new file mode 100644
index 00000000000..b451461b9c2
--- /dev/null
+++ b/network/f5/bigip_sys_db.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_sys_db
+short_description: Manage BIG-IP system database variables
+description:
+ - Manage BIG-IP system database variables
+version_added: "2.2"
+options:
+ key:
+ description:
+ - The database variable to manipulate.
+ required: true
+ state:
+ description:
+ - The state of the variable on the system. When C(present), guarantees
+ that an existing variable is set to C(value). When C(reset) sets the
+ variable back to the default value. At least one of value and state
+ C(reset) are required.
+ required: false
+ default: present
+ choices:
+ - present
+ - reset
+ value:
+ description:
+ - The value to set the key to. At least one of value and state C(reset)
+ are required.
+ required: false
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+ - Requires BIG-IP version 12.0.0 or greater
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Set the boot.quiet DB variable on the BIG-IP
+ bigip_sys_db:
+ user: "admin"
+ password: "secret"
+ server: "lb.mydomain.com"
+ key: "boot.quiet"
+ value: "disable"
+ delegate_to: localhost
+
+- name: Disable the initial setup screen
+ bigip_sys_db:
+ user: "admin"
+ password: "secret"
+ server: "lb.mydomain.com"
+ key: "setup.run"
+ value: "false"
+ delegate_to: localhost
+
+- name: Reset the initial setup screen
+ bigip_sys_db:
+ user: "admin"
+ password: "secret"
+ server: "lb.mydomain.com"
+ key: "setup.run"
+ state: "reset"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+name:
+ description: The key in the system database that was specified
+ returned: changed and success
+ type: string
+ sample: "setup.run"
+default_value:
+ description: The default value of the key
+ returned: changed and success
+ type: string
+ sample: "true"
+value:
+ description: The value that you set the key to
+ returned: changed and success
+ type: string
+ sample: "false"
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+class BigIpSysDb(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+ value = self.params['value']
+
+ if not state == 'reset' and not value:
+ raise F5ModuleError(
+ "When setting a key, a value must be supplied"
+ )
+
+ current = self.read()
+
+ if self.params['check_mode']:
+ if value == current:
+ changed = False
+ else:
+ changed = True
+ else:
+ if state == "present":
+ changed = self.present()
+ elif state == "reset":
+ changed = self.reset()
+ current = self.read()
+ result.update(
+ name=current.name,
+ default_value=current.defaultValue,
+ value=current.value
+ )
+
+ result.update(dict(changed=changed))
+ return result
+
+ def read(self):
+ dbs = self.api.tm.sys.dbs.db.load(
+ name=self.params['key']
+ )
+ return dbs
+
+ def present(self):
+ current = self.read()
+
+ if current.value == self.params['value']:
+ return False
+
+ current.update(value=self.params['value'])
+ current.refresh()
+
+ if current.value != self.params['value']:
+ raise F5ModuleError(
+ "Failed to set the DB variable"
+ )
+ return True
+
+ def reset(self):
+ current = self.read()
+
+ default = current.defaultValue
+ if current.value == default:
+ return False
+
+ current.update(value=default)
+ current.refresh()
+
+ if current.value != current.defaultValue:
+ raise F5ModuleError(
+ "Failed to reset the DB variable"
+ )
+
+ return True
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ key=dict(required=True),
+ state=dict(default='present', choices=['present', 'reset']),
+ value=dict(required=False, default=None)
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ try:
+ obj = BigIpSysDb(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_sys_global.py b/network/f5/bigip_sys_global.py
new file mode 100644
index 00000000000..7e6cfd78064
--- /dev/null
+++ b/network/f5/bigip_sys_global.py
@@ -0,0 +1,430 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_sys_global
+short_description: Manage BIG-IP global settings.
+description:
+ - Manage BIG-IP global settings.
+version_added: "2.3"
+options:
+ banner_text:
+ description:
+ - Specifies the text to present in the advisory banner.
+ console_timeout:
+ description:
+ - Specifies the number of seconds of inactivity before the system logs
+ off a user that is logged on.
+ gui_setup:
+ description:
+ - C(enable) or C(disabled) the Setup utility in the browser-based
+ Configuration utility
+ choices:
+ - enabled
+ - disabled
+ lcd_display:
+ description:
+ - Specifies, when C(enabled), that the system menu displays on the
+ LCD screen on the front of the unit. This setting has no effect
+ when used on the VE platform.
+ choices:
+ - enabled
+ - disabled
+ mgmt_dhcp:
+ description:
+ - Specifies whether or not to enable DHCP client on the management
+ interface
+ choices:
+ - enabled
+ - disabled
+ net_reboot:
+ description:
+ - Specifies, when C(enabled), that the next time you reboot the system,
+ the system boots to an ISO image on the network, rather than an
+ internal media drive.
+ choices:
+ - enabled
+ - disabled
+ quiet_boot:
+ description:
+ - Specifies, when C(enabled), that the system suppresses informational
+ text on the console during the boot cycle. When C(disabled), the
+ system presents messages and informational text on the console during
+ the boot cycle.
+ security_banner:
+ description:
+ - Specifies whether the system displays an advisory message on the
+ login screen.
+ choices:
+ - enabled
+ - disabled
+ state:
+ description:
+ - The state of the variable on the system. When C(present), guarantees
+ that an existing variable is set to C(value).
+ required: false
+ default: present
+ choices:
+ - present
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Disable the setup utility
+ bigip_sys_global:
+ gui_setup: "disabled"
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ state: "present"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+banner_text:
+ description: The new text to present in the advisory banner.
+ returned: changed
+ type: string
+ sample: "This is a corporate device. Do not touch."
+console_timeout:
+ description: >
+ The new number of seconds of inactivity before the system
+ logs off a user that is logged on.
+ returned: changed
+ type: integer
+ sample: 600
+gui_setup:
+ description: The new setting for the Setup utility.
+ returned: changed
+ type: string
+ sample: enabled
+lcd_display:
+ description: The new setting for displaying the system menu on the LCD.
+ returned: changed
+ type: string
+ sample: enabled
+mgmt_dhcp:
+ description: >
+ The new setting for whether the mgmt interface should DHCP
+ or not
+ returned: changed
+ type: string
+ sample: enabled
+net_reboot:
+ description: >
+ The new setting for whether the system should boot to an ISO on the
+ network or not
+ returned: changed
+ type: string
+ sample: enabled
+quiet_boot:
+ description: >
+ The new setting for whether the system should suppress information to
+ the console during boot or not.
+ returned: changed
+ type: string
+ sample: enabled
+security_banner:
+ description: >
+ The new setting for whether the system should display an advisory message
+ on the login screen or not
+ returned: changed
+ type: string
+ sample: enabled
+'''
+
+try:
+ from f5.bigip.contexts import TransactionContextManager
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+class BigIpSysGlobalManager(object):
+ def __init__(self, *args, **kwargs):
+ self.changed_params = dict()
+ self.params = kwargs
+ self.api = None
+
+ def apply_changes(self):
+ result = dict()
+
+ changed = self.apply_to_running_config()
+
+ result.update(**self.changed_params)
+ result.update(dict(changed=changed))
+ return result
+
+ def apply_to_running_config(self):
+ try:
+ self.api = self.connect_to_bigip(**self.params)
+ return self.update_sys_global_settings()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ def connect_to_bigip(self, **kwargs):
+ return ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def read_sys_global_information(self):
+ settings = self.load_sys_global()
+ return self.format_sys_global_information(settings)
+
+ def load_sys_global(self):
+ return self.api.tm.sys.global_settings.load()
+
+ def get_changed_parameters(self):
+ result = dict()
+ current = self.read_sys_global_information()
+ if self.security_banner_is_changed(current):
+ result['guiSecurityBanner'] = self.params['security_banner']
+ if self.banner_text_is_changed(current):
+ result['guiSecurityBannerText'] = self.params['banner_text']
+ if self.gui_setup_is_changed(current):
+ result['guiSetup'] = self.params['gui_setup']
+ if self.lcd_display_is_changed(current):
+ result['lcdDisplay'] = self.params['lcd_display']
+ if self.mgmt_dhcp_is_changed(current):
+ result['mgmtDhcp'] = self.params['mgmt_dhcp']
+ if self.net_reboot_is_changed(current):
+ result['netReboot'] = self.params['net_reboot']
+ if self.quiet_boot_is_changed(current):
+ result['quietBoot'] = self.params['quiet_boot']
+ if self.console_timeout_is_changed(current):
+ result['consoleInactivityTimeout'] = self.params['console_timeout']
+ return result
+
+ def security_banner_is_changed(self, current):
+ if self.params['security_banner'] is None:
+ return False
+ if 'security_banner' not in current:
+ return True
+ if self.params['security_banner'] == current['security_banner']:
+ return False
+ else:
+ return True
+
+ def banner_text_is_changed(self, current):
+ if self.params['banner_text'] is None:
+ return False
+ if 'banner_text' not in current:
+ return True
+ if self.params['banner_text'] == current['banner_text']:
+ return False
+ else:
+ return True
+
+ def gui_setup_is_changed(self, current):
+ if self.params['gui_setup'] is None:
+ return False
+ if 'gui_setup' not in current:
+ return True
+ if self.params['gui_setup'] == current['gui_setup']:
+ return False
+ else:
+ return True
+
+ def lcd_display_is_changed(self, current):
+ if self.params['lcd_display'] is None:
+ return False
+ if 'lcd_display' not in current:
+ return True
+ if self.params['lcd_display'] == current['lcd_display']:
+ return False
+ else:
+ return True
+
+ def mgmt_dhcp_is_changed(self, current):
+ if self.params['mgmt_dhcp'] is None:
+ return False
+ if 'mgmt_dhcp' not in current:
+ return True
+ if self.params['mgmt_dhcp'] == current['mgmt_dhcp']:
+ return False
+ else:
+ return True
+
+ def net_reboot_is_changed(self, current):
+ if self.params['net_reboot'] is None:
+ return False
+ if 'net_reboot' not in current:
+ return True
+ if self.params['net_reboot'] == current['net_reboot']:
+ return False
+ else:
+ return True
+
+ def quiet_boot_is_changed(self, current):
+ if self.params['quiet_boot'] is None:
+ return False
+ if 'quiet_boot' not in current:
+ return True
+ if self.params['quiet_boot'] == current['quiet_boot']:
+ return False
+ else:
+ return True
+
+ def console_timeout_is_changed(self, current):
+ if self.params['console_timeout'] is None:
+ return False
+ if 'console_timeout' not in current:
+ return True
+ if self.params['console_timeout'] == current['console_timeout']:
+ return False
+ else:
+ return True
+
+ def format_sys_global_information(self, settings):
+ result = dict()
+ if hasattr(settings, 'guiSecurityBanner'):
+ result['security_banner'] = str(settings.guiSecurityBanner)
+ if hasattr(settings, 'guiSecurityBannerText'):
+ result['banner_text'] = str(settings.guiSecurityBannerText)
+ if hasattr(settings, 'guiSetup'):
+ result['gui_setup'] = str(settings.guiSetup)
+ if hasattr(settings, 'lcdDisplay'):
+ result['lcd_display'] = str(settings.lcdDisplay)
+ if hasattr(settings, 'mgmtDhcp'):
+ result['mgmt_dhcp'] = str(settings.mgmtDhcp)
+ if hasattr(settings, 'netReboot'):
+ result['net_reboot'] = str(settings.netReboot)
+ if hasattr(settings, 'quietBoot'):
+ result['quiet_boot'] = str(settings.quietBoot)
+ if hasattr(settings, 'consoleInactivityTimeout'):
+ result['console_timeout'] = int(settings.consoleInactivityTimeout)
+ return result
+
+ def update_sys_global_settings(self):
+ params = self.get_changed_parameters()
+ if params:
+ self.changed_params = camel_dict_to_snake_dict(params)
+ if self.params['check_mode']:
+ return True
+ else:
+ return False
+ self.update_sys_global_settings_on_device(params)
+ return True
+
+ def update_sys_global_settings_on_device(self, params):
+ tx = self.api.tm.transactions.transaction
+ with TransactionContextManager(tx) as api:
+ r = api.tm.sys.global_settings.load()
+ r.update(**params)
+
+
+class BigIpSysGlobalModuleConfig(object):
+ def __init__(self):
+ self.argument_spec = dict()
+ self.meta_args = dict()
+ self.supports_check_mode = True
+ self.states = ['present']
+ self.on_off_choices = ['enabled', 'disabled']
+
+ self.initialize_meta_args()
+ self.initialize_argument_spec()
+
+ def initialize_meta_args(self):
+ args = dict(
+ security_banner=dict(
+ required=False,
+ choices=self.on_off_choices,
+ default=None
+ ),
+ banner_text=dict(required=False, default=None),
+ gui_setup=dict(
+ required=False,
+ choices=self.on_off_choices,
+ default=None
+ ),
+ lcd_display=dict(
+ required=False,
+ choices=self.on_off_choices,
+ default=None
+ ),
+ mgmt_dhcp=dict(
+ required=False,
+ choices=self.on_off_choices,
+ default=None
+ ),
+ net_reboot=dict(
+ required=False,
+ choices=self.on_off_choices,
+ default=None
+ ),
+ quiet_boot=dict(
+ required=False,
+ choices=self.on_off_choices,
+ default=None
+ ),
+ console_timeout=dict(required=False, type='int', default=None),
+ state=dict(default='present', choices=['present'])
+ )
+ self.meta_args = args
+
+ def initialize_argument_spec(self):
+ self.argument_spec = f5_argument_spec()
+ self.argument_spec.update(self.meta_args)
+
+ def create(self):
+ return AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=self.supports_check_mode
+ )
+
+
+def main():
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ config = BigIpSysGlobalModuleConfig()
+ module = config.create()
+
+ try:
+ obj = BigIpSysGlobalManager(
+ check_mode=module.check_mode, **module.params
+ )
+ result = obj.apply_changes()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_virtual_server.py b/network/f5/bigip_virtual_server.py
new file mode 100644
index 00000000000..ddcf2cd0e6a
--- /dev/null
+++ b/network/f5/bigip_virtual_server.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Etienne Carriere
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_virtual_server
+short_description: "Manages F5 BIG-IP LTM virtual servers"
+description:
+ - "Manages F5 BIG-IP LTM virtual servers via iControl SOAP API"
+version_added: "2.1"
+author:
+ - Etienne Carriere (@Etienne-Carriere)
+ - Tim Rupp (@caphrim007)
+notes:
+ - "Requires BIG-IP software version >= 11"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+requirements:
+ - bigsuds
+options:
+ state:
+ description:
+ - Virtual Server state
+ - Absent, delete the VS if present
+ - C(present) (and its synonym enabled), create if needed the VS and set
+ state to enabled
+ - C(disabled), create if needed the VS and set state to disabled
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+ - enabled
+ - disabled
+ aliases: []
+ partition:
+ description:
+ - Partition
+ required: false
+ default: 'Common'
+ name:
+ description:
+ - Virtual server name
+ required: true
+ aliases:
+ - vs
+ destination:
+ description:
+ - Destination IP of the virtual server (only host is currently supported).
+ Required when state=present and vs does not exist.
+ required: true
+ aliases:
+ - address
+ - ip
+ port:
+ description:
+ - Port of the virtual server . Required when state=present and vs does not exist
+ required: false
+ default: None
+ all_profiles:
+ description:
+ - List of all Profiles (HTTP,ClientSSL,ServerSSL,etc) that must be used
+ by the virtual server
+ required: false
+ default: None
+ all_rules:
+ version_added: "2.2"
+ description:
+ - List of rules to be applied in priority order
+ required: false
+ default: None
+ enabled_vlans:
+ version_added: "2.2"
+ description:
+ - List of vlans to be enabled. When a VLAN named C(ALL) is used, all
+ VLANs will be allowed.
+ required: false
+ default: None
+ pool:
+ description:
+ - Default pool for the virtual server
+ required: false
+ default: None
+ snat:
+ description:
+ - Source network address policy
+ required: false
+ choices:
+ - None
+ - Automap
+ - Name of a SNAT pool (eg "/Common/snat_pool_name") to enable SNAT with the specific pool
+ default: None
+ default_persistence_profile:
+ description:
+ - Default Profile which manages the session persistence
+ required: false
+ default: None
+ route_advertisement_state:
+ description:
+ - Enable route advertisement for destination
+ required: false
+ default: disabled
+ version_added: "2.3"
+ description:
+ description:
+ - Virtual server description
+ required: false
+ default: None
+extends_documentation_fragment: f5
+'''
+
+EXAMPLES = '''
+- name: Add virtual server
+ bigip_virtual_server:
+ server: lb.mydomain.net
+ user: admin
+ password: secret
+ state: present
+ partition: MyPartition
+ name: myvirtualserver
+ destination: "{{ ansible_default_ipv4['address'] }}"
+ port: 443
+ pool: "{{ mypool }}"
+ snat: Automap
+ description: Test Virtual Server
+ all_profiles:
+ - http
+ - clientssl
+ enabled_vlans:
+ - /Common/vlan2
+ delegate_to: localhost
+
+- name: Modify Port of the Virtual Server
+ bigip_virtual_server:
+ server: lb.mydomain.net
+ user: admin
+ password: secret
+ state: present
+ partition: MyPartition
+ name: myvirtualserver
+ port: 8080
+ delegate_to: localhost
+
+- name: Delete virtual server
+ bigip_virtual_server:
+ server: lb.mydomain.net
+ user: admin
+ password: secret
+ state: absent
+ partition: MyPartition
+ name: myvirtualserver
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+deleted:
+ description: Name of a virtual server that was deleted
+ returned: changed
+ type: string
+ sample: "my-virtual-server"
+'''
+
+
+# map of state values
+STATES = {
+ 'enabled': 'STATE_ENABLED',
+ 'disabled': 'STATE_DISABLED'
+}
+
+STATUSES = {
+ 'enabled': 'SESSION_STATUS_ENABLED',
+ 'disabled': 'SESSION_STATUS_DISABLED',
+ 'offline': 'SESSION_STATUS_FORCED_DISABLED'
+}
+
+
+def vs_exists(api, vs):
+ # hack to determine if pool exists
+ result = False
+ try:
+ api.LocalLB.VirtualServer.get_object_status(virtual_servers=[vs])
+ result = True
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result = False
+ else:
+ # genuine exception
+ raise
+ return result
+
+
+def vs_create(api, name, destination, port, pool):
+ _profiles = [[{'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': 'tcp'}]]
+ created = False
+ # a bit of a hack to handle concurrent runs of this module.
+ # even though we've checked the vs doesn't exist,
+ # it may exist by the time we run create_vs().
+ # this catches the exception and does something smart
+ # about it!
+ try:
+ api.LocalLB.VirtualServer.create(
+ definitions=[{'name': [name], 'address': [destination], 'port': port, 'protocol': 'PROTOCOL_TCP'}],
+ wildmasks=['255.255.255.255'],
+ resources=[{'type': 'RESOURCE_TYPE_POOL', 'default_pool_name': pool}],
+ profiles=_profiles)
+ created = True
+ return created
+ except bigsuds.OperationFailed as e:
+ if "already exists" not in str(e):
+ raise Exception('Error on creating Virtual Server : %s' % e)
+
+
+def vs_remove(api, name):
+ api.LocalLB.VirtualServer.delete_virtual_server(
+ virtual_servers=[name]
+ )
+
+
+def get_rules(api, name):
+ return api.LocalLB.VirtualServer.get_rule(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_rules(api, name, rules_list):
+ updated = False
+ if rules_list is None:
+ return False
+ rules_list = list(enumerate(rules_list))
+ try:
+ current_rules = map(lambda x: (x['priority'], x['rule_name']), get_rules(api, name))
+ to_add_rules = []
+ for i, x in rules_list:
+ if (i, x) not in current_rules:
+ to_add_rules.append({'priority': i, 'rule_name': x})
+ to_del_rules = []
+ for i, x in current_rules:
+ if (i, x) not in rules_list:
+ to_del_rules.append({'priority': i, 'rule_name': x})
+ if len(to_del_rules) > 0:
+ api.LocalLB.VirtualServer.remove_rule(
+ virtual_servers=[name],
+ rules=[to_del_rules]
+ )
+ updated = True
+ if len(to_add_rules) > 0:
+ api.LocalLB.VirtualServer.add_rule(
+ virtual_servers=[name],
+ rules=[to_add_rules]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting rules : %s' % e)
+
+
+def get_profiles(api, name):
+ return api.LocalLB.VirtualServer.get_profile(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_profiles(api, name, profiles_list):
+ updated = False
+ try:
+ if profiles_list is None:
+ return False
+ current_profiles = list(map(lambda x: x['profile_name'], get_profiles(api, name)))
+ to_add_profiles = []
+ for x in profiles_list:
+ if x not in current_profiles:
+ to_add_profiles.append({'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': x})
+ to_del_profiles = []
+ for x in current_profiles:
+ if (x not in profiles_list) and (x != "/Common/tcp"):
+ to_del_profiles.append({'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': x})
+ if len(to_del_profiles) > 0:
+ api.LocalLB.VirtualServer.remove_profile(
+ virtual_servers=[name],
+ profiles=[to_del_profiles]
+ )
+ updated = True
+ if len(to_add_profiles) > 0:
+ api.LocalLB.VirtualServer.add_profile(
+ virtual_servers=[name],
+ profiles=[to_add_profiles]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting profiles : %s' % e)
+
+
+def get_vlan(api, name):
+ return api.LocalLB.VirtualServer.get_vlan(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_enabled_vlans(api, name, vlans_enabled_list):
+ updated = False
+ to_add_vlans = []
+ try:
+ if vlans_enabled_list is None:
+ return updated
+ current_vlans = get_vlan(api, name)
+
+ # Set allowed list back to default ("all")
+ #
+ # This case allows you to undo what you may have previously done.
+ # The default case is "All VLANs and Tunnels". This case will handle
+ # that situation.
+ if 'ALL' in vlans_enabled_list:
+ # The user is coming from a situation where they previously
+ # were specifying a list of allowed VLANs
+ if len(current_vlans['vlans']) > 0 or \
+ current_vlans['state'] is "STATE_ENABLED":
+ api.LocalLB.VirtualServer.set_vlan(
+ virtual_servers=[name],
+ vlans=[{'state': 'STATE_DISABLED', 'vlans': []}]
+ )
+ updated = True
+ else:
+ if current_vlans['state'] is "STATE_DISABLED":
+ to_add_vlans = vlans_enabled_list
+ else:
+ for vlan in vlans_enabled_list:
+ if vlan not in current_vlans['vlans']:
+ updated = True
+ to_add_vlans = vlans_enabled_list
+ break
+ if updated:
+ api.LocalLB.VirtualServer.set_vlan(
+ virtual_servers=[name],
+ vlans=[{
+ 'state': 'STATE_ENABLED',
+ 'vlans': [to_add_vlans]
+ }]
+ )
+
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting enabled vlans : %s' % e)
+
+
+def set_snat(api, name, snat):
+ updated = False
+ try:
+ current_state = get_snat_type(api, name)
+ current_snat_pool = get_snat_pool(api, name)
+ if snat is None:
+ return updated
+ elif snat == 'None' and current_state != 'SRC_TRANS_NONE':
+ api.LocalLB.VirtualServer.set_source_address_translation_none(
+ virtual_servers=[name]
+ )
+ updated = True
+ elif snat == 'Automap' and current_state != 'SRC_TRANS_AUTOMAP':
+ api.LocalLB.VirtualServer.set_source_address_translation_automap(
+ virtual_servers=[name]
+ )
+ updated = True
+ elif snat_settings_need_updating(snat, current_state, current_snat_pool):
+ api.LocalLB.VirtualServer.set_source_address_translation_snat_pool(
+ virtual_servers=[name],
+ pools=[snat]
+ )
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting snat : %s' % e)
+
+
+def get_snat_type(api, name):
+ return api.LocalLB.VirtualServer.get_source_address_translation_type(
+ virtual_servers=[name]
+ )[0]
+
+
+def get_snat_pool(api, name):
+ return api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(
+ virtual_servers=[name]
+ )[0]
+
+
+def snat_settings_need_updating(snat, current_state, current_snat_pool):
+ if snat == 'None' or snat == 'Automap':
+ return False
+ elif snat and current_state != 'SRC_TRANS_SNATPOOL':
+ return True
+ elif snat and current_state == 'SRC_TRANS_SNATPOOL' and current_snat_pool != snat:
+ return True
+ else:
+ return False
+
+
+def get_pool(api, name):
+ return api.LocalLB.VirtualServer.get_default_pool_name(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_pool(api, name, pool):
+ updated = False
+ try:
+ current_pool = get_pool(api, name)
+ if pool is not None and (pool != current_pool):
+ api.LocalLB.VirtualServer.set_default_pool_name(
+ virtual_servers=[name],
+ default_pools=[pool]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting pool : %s' % e)
+
+
+def get_destination(api, name):
+ return api.LocalLB.VirtualServer.get_destination_v2(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_destination(api, name, destination):
+ updated = False
+ try:
+ current_destination = get_destination(api, name)
+ if destination is not None and destination != current_destination['address']:
+ api.LocalLB.VirtualServer.set_destination_v2(
+ virtual_servers=[name],
+ destinations=[{'address': destination, 'port': current_destination['port']}]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting destination : %s' % e)
+
+
+def set_port(api, name, port):
+ updated = False
+ try:
+ current_destination = get_destination(api, name)
+ if port is not None and port != current_destination['port']:
+ api.LocalLB.VirtualServer.set_destination_v2(
+ virtual_servers=[name],
+ destinations=[{'address': current_destination['address'], 'port': port}]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting port : %s' % e)
+
+
+def get_state(api, name):
+ return api.LocalLB.VirtualServer.get_enabled_state(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_state(api, name, state):
+ updated = False
+ try:
+ current_state = get_state(api, name)
+ # We consider that being present is equivalent to enabled
+ if state == 'present':
+ state = 'enabled'
+ if STATES[state] != current_state:
+ api.LocalLB.VirtualServer.set_enabled_state(
+ virtual_servers=[name],
+ states=[STATES[state]]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting state : %s' % e)
+
+
+def get_description(api, name):
+ return api.LocalLB.VirtualServer.get_description(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_description(api, name, description):
+ updated = False
+ try:
+ current_description = get_description(api, name)
+ if description is not None and current_description != description:
+ api.LocalLB.VirtualServer.set_description(
+ virtual_servers=[name],
+ descriptions=[description]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting description : %s ' % e)
+
+
+def get_persistence_profiles(api, name):
+ return api.LocalLB.VirtualServer.get_persistence_profile(
+ virtual_servers=[name]
+ )[0]
+
+
+def set_default_persistence_profiles(api, name, persistence_profile):
+ updated = False
+ if persistence_profile is None:
+ return updated
+ try:
+ current_persistence_profiles = get_persistence_profiles(api, name)
+ default = None
+ for profile in current_persistence_profiles:
+ if profile['default_profile']:
+ default = profile['profile_name']
+ break
+ if default is not None and default != persistence_profile:
+ api.LocalLB.VirtualServer.remove_persistence_profile(
+ virtual_servers=[name],
+ profiles=[[{'profile_name': default, 'default_profile': True}]]
+ )
+ if default != persistence_profile:
+ api.LocalLB.VirtualServer.add_persistence_profile(
+ virtual_servers=[name],
+ profiles=[[{'profile_name': persistence_profile, 'default_profile': True}]]
+ )
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting default persistence profile : %s' % e)
+
+
+def get_route_advertisement_status(api, address):
+ result = api.LocalLB.VirtualAddressV2.get_route_advertisement_state(virtual_addresses=[address]).pop(0)
+ result = result.split("STATE_")[-1].lower()
+ return result
+
+
+def set_route_advertisement_state(api, destination, partition, route_advertisement_state):
+ updated = False
+
+ try:
+ state = "STATE_%s" % route_advertisement_state.strip().upper()
+ address = fq_name(partition, destination,)
+ current_route_advertisement_state=get_route_advertisement_status(api,address)
+ if current_route_advertisement_state != route_advertisement_state:
+ api.LocalLB.VirtualAddressV2.set_route_advertisement_state(virtual_addresses=[address], states=[state])
+ updated = True
+ return updated
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on setting profiles : %s' % e)
+
+
+def main():
+ argument_spec = f5_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'disabled', 'enabled']),
+ name=dict(type='str', required=True, aliases=['vs']),
+ destination=dict(type='str', aliases=['address', 'ip']),
+ port=dict(type='int'),
+ all_profiles=dict(type='list'),
+ all_rules=dict(type='list'),
+ enabled_vlans=dict(type='list'),
+ pool=dict(type='str'),
+ description=dict(type='str'),
+ snat=dict(type='str'),
+ route_advertisement_state=dict(type='str', default='disabled', choices=['enabled', 'disabled']),
+ default_persistence_profile=dict(type='str')
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not bigsuds_found:
+ module.fail_json(msg="the python bigsuds module is required")
+
+ if module.params['validate_certs']:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
+ server = module.params['server']
+ server_port = module.params['server_port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ partition = module.params['partition']
+ validate_certs = module.params['validate_certs']
+
+ name = fq_name(partition, module.params['name'])
+ destination = module.params['destination']
+ port = module.params['port']
+ all_profiles = fq_list_names(partition, module.params['all_profiles'])
+ all_rules = fq_list_names(partition, module.params['all_rules'])
+
+ enabled_vlans = module.params['enabled_vlans']
+ if enabled_vlans is None or 'ALL' in enabled_vlans:
+ all_enabled_vlans = enabled_vlans
+ else:
+ all_enabled_vlans = fq_list_names(partition, enabled_vlans)
+
+ pool = fq_name(partition, module.params['pool'])
+ description = module.params['description']
+ snat = module.params['snat']
+ route_advertisement_state = module.params['route_advertisement_state']
+ default_persistence_profile = fq_name(partition, module.params['default_persistence_profile'])
+
+ if 1 > port > 65535:
+ module.fail_json(msg="valid ports must be in range 1 - 65535")
+
+ try:
+ api = bigip_api(server, user, password, validate_certs, port=server_port)
+ result = {'changed': False} # default
+
+ if state == 'absent':
+ if not module.check_mode:
+ if vs_exists(api, name):
+ # hack to handle concurrent runs of module
+ # pool might be gone before we actually remove
+ try:
+ vs_remove(api, name)
+ result = {'changed': True, 'deleted': name}
+ except bigsuds.OperationFailed as e:
+ if "was not found" in str(e):
+ result['changed'] = False
+ else:
+ raise
+ else:
+ # check-mode return value
+ result = {'changed': True}
+
+ else:
+ update = False
+ if not vs_exists(api, name):
+ if (not destination) or (not port):
+ module.fail_json(msg="both destination and port must be supplied to create a VS")
+ if not module.check_mode:
+ # a bit of a hack to handle concurrent runs of this module.
+ # even though we've checked the virtual_server doesn't exist,
+ # it may exist by the time we run virtual_server().
+ # this catches the exception and does something smart
+ # about it!
+ try:
+ vs_create(api, name, destination, port, pool)
+ set_profiles(api, name, all_profiles)
+ set_enabled_vlans(api, name, all_enabled_vlans)
+ set_rules(api, name, all_rules)
+ set_snat(api, name, snat)
+ set_description(api, name, description)
+ set_default_persistence_profiles(api, name, default_persistence_profile)
+ set_state(api, name, state)
+ set_route_advertisement_state(api, destination, partition, route_advertisement_state)
+ result = {'changed': True}
+ except bigsuds.OperationFailed as e:
+ raise Exception('Error on creating Virtual Server : %s' % e)
+ else:
+ # check-mode return value
+ result = {'changed': True}
+ else:
+ update = True
+ if update:
+ # VS exists
+ if not module.check_mode:
+ # Have a transaction for all the changes
+ try:
+ api.System.Session.start_transaction()
+ result['changed'] |= set_destination(api, name, fq_name(partition, destination))
+ result['changed'] |= set_port(api, name, port)
+ result['changed'] |= set_pool(api, name, pool)
+ result['changed'] |= set_description(api, name, description)
+ result['changed'] |= set_snat(api, name, snat)
+ result['changed'] |= set_profiles(api, name, all_profiles)
+ result['changed'] |= set_enabled_vlans(api, name, all_enabled_vlans)
+ result['changed'] |= set_rules(api, name, all_rules)
+ result['changed'] |= set_default_persistence_profiles(api, name, default_persistence_profile)
+ result['changed'] |= set_state(api, name, state)
+ result['changed'] |= set_route_advertisement_state(api, destination, partition, route_advertisement_state)
+ api.System.Session.submit_transaction()
+ except Exception as e:
+ raise Exception("Error on updating Virtual Server : %s" % e)
+ else:
+ # check-mode return value
+ result = {'changed': True}
+
+ except Exception as e:
+ module.fail_json(msg="received exception: %s" % e)
+
+ module.exit_json(**result)
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/f5/bigip_vlan.py b/network/f5/bigip_vlan.py
new file mode 100644
index 00000000000..40df948f6c6
--- /dev/null
+++ b/network/f5/bigip_vlan.py
@@ -0,0 +1,451 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 F5 Networks Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: bigip_vlan
+short_description: Manage VLANs on a BIG-IP system
+description:
+ - Manage VLANs on a BIG-IP system
+version_added: "2.2"
+options:
+ description:
+ description:
+ - The description to give to the VLAN.
+ tagged_interfaces:
+ description:
+ - Specifies a list of tagged interfaces and trunks that you want to
+ configure for the VLAN. Use tagged interfaces or trunks when
+ you want to assign a single interface or trunk to multiple VLANs.
+ required: false
+ aliases:
+ - tagged_interface
+ untagged_interfaces:
+ description:
+ - Specifies a list of untagged interfaces and trunks that you want to
+ configure for the VLAN.
+ required: false
+ aliases:
+ - untagged_interface
+ name:
+ description:
+ - The VLAN to manage. If the special VLAN C(ALL) is specified with
+ the C(state) value of C(absent) then all VLANs will be removed.
+ required: true
+ state:
+ description:
+ - The state of the VLAN on the system. When C(present), guarantees
+ that the VLAN exists with the provided attributes. When C(absent),
+ removes the VLAN from the system.
+ required: false
+ default: present
+ choices:
+ - absent
+ - present
+ tag:
+ description:
+ - Tag number for the VLAN. The tag number can be any integer between 1
+ and 4094. The system automatically assigns a tag number if you do not
+ specify a value.
+notes:
+ - Requires the f5-sdk Python package on the host. This is as easy as pip
+ install f5-sdk.
+ - Requires BIG-IP versions >= 12.0.0
+extends_documentation_fragment: f5
+requirements:
+ - f5-sdk
+author:
+ - Tim Rupp (@caphrim007)
+'''
+
+EXAMPLES = '''
+- name: Create VLAN
+ bigip_vlan:
+ name: "net1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+
+- name: Set VLAN tag
+ bigip_vlan:
+ name: "net1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ tag: "2345"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+
+- name: Add VLAN 2345 as tagged to interface 1.1
+ bigip_vlan:
+ tagged_interface: 1.1
+ name: "net1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ tag: "2345"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+
+- name: Add VLAN 1234 as tagged to interfaces 1.1 and 1.2
+ bigip_vlan:
+ tagged_interfaces:
+ - 1.1
+ - 1.2
+ name: "net1"
+ password: "secret"
+ server: "lb.mydomain.com"
+ tag: "1234"
+ user: "admin"
+ validate_certs: "no"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+description:
+ description: The description set on the VLAN
+ returned: changed
+ type: string
+ sample: foo VLAN
+interfaces:
+ description: Interfaces that the VLAN is assigned to
+ returned: changed
+ type: list
+ sample: ['1.1','1.2']
+name:
+ description: The name of the VLAN
+ returned: changed
+ type: string
+ sample: net1
+partition:
+ description: The partition that the VLAN was created on
+ returned: changed
+ type: string
+ sample: Common
+tag:
+ description: The ID of the VLAN
+ returned: changed
+ type: int
+ sample: 2345
+'''
+
+try:
+ from f5.bigip import ManagementRoot
+ from icontrol.session import iControlUnexpectedHTTPError
+ HAS_F5SDK = True
+except ImportError:
+ HAS_F5SDK = False
+
+
+class BigIpVlan(object):
+ def __init__(self, *args, **kwargs):
+ if not HAS_F5SDK:
+ raise F5ModuleError("The python f5-sdk module is required")
+
+ # The params that change in the module
+ self.cparams = dict()
+
+ # Stores the params that are sent to the module
+ self.params = kwargs
+ self.api = ManagementRoot(kwargs['server'],
+ kwargs['user'],
+ kwargs['password'],
+ port=kwargs['server_port'])
+
+ def present(self):
+ if self.exists():
+ return self.update()
+ else:
+ return self.create()
+
+ def absent(self):
+ changed = False
+
+ if self.exists():
+ changed = self.delete()
+
+ return changed
+
+ def read(self):
+ """Read information and transform it
+
+ The values that are returned by BIG-IP in the f5-sdk can have encoding
+ attached to them as well as be completely missing in some cases.
+
+ Therefore, this method will transform the data from the BIG-IP into a
+ format that is more easily consumable by the rest of the class and the
+ parameters that are supported by the module.
+ """
+ p = dict()
+ name = self.params['name']
+ partition = self.params['partition']
+ r = self.api.tm.net.vlans.vlan.load(
+ name=name,
+ partition=partition
+ )
+ ifcs = r.interfaces_s.get_collection()
+ if hasattr(r, 'tag'):
+ p['tag'] = int(r.tag)
+ if hasattr(r, 'description'):
+ p['description'] = str(r.description)
+ if len(ifcs) is not 0:
+ untagged = []
+ tagged = []
+ for x in ifcs:
+ if hasattr(x, 'tagged'):
+ tagged.append(str(x.name))
+ elif hasattr(x, 'untagged'):
+ untagged.append(str(x.name))
+ if untagged:
+ p['untagged_interfaces'] = list(set(untagged))
+ if tagged:
+ p['tagged_interfaces'] = list(set(tagged))
+ p['name'] = name
+ return p
+
+ def create(self):
+ params = dict()
+
+ check_mode = self.params['check_mode']
+ description = self.params['description']
+ name = self.params['name']
+ untagged_interfaces = self.params['untagged_interfaces']
+ tagged_interfaces = self.params['tagged_interfaces']
+ partition = self.params['partition']
+ tag = self.params['tag']
+
+ if tag is not None:
+ params['tag'] = tag
+
+ if untagged_interfaces is not None or tagged_interfaces is not None:
+ tmp = []
+ ifcs = self.api.tm.net.interfaces.get_collection()
+ ifcs = [str(x.name) for x in ifcs]
+
+ if len(ifcs) is 0:
+ raise F5ModuleError(
+ 'No interfaces were found'
+ )
+
+ pinterfaces = []
+ if untagged_interfaces:
+ interfaces = untagged_interfaces
+ elif tagged_interfaces:
+ interfaces = tagged_interfaces
+
+ for ifc in interfaces:
+ ifc = str(ifc)
+ if ifc in ifcs:
+ pinterfaces.append(ifc)
+
+ if tagged_interfaces:
+ tmp = [dict(name=x, tagged=True) for x in pinterfaces]
+ elif untagged_interfaces:
+ tmp = [dict(name=x, untagged=True) for x in pinterfaces]
+
+ if tmp:
+ params['interfaces'] = tmp
+
+ if description is not None:
+ params['description'] = self.params['description']
+
+ params['name'] = name
+ params['partition'] = partition
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ d = self.api.tm.net.vlans.vlan
+ d.create(**params)
+
+ if self.exists():
+ return True
+ else:
+ raise F5ModuleError("Failed to create the VLAN")
+
+ def update(self):
+ changed = False
+ params = dict()
+ current = self.read()
+
+ check_mode = self.params['check_mode']
+ description = self.params['description']
+ name = self.params['name']
+ tag = self.params['tag']
+ partition = self.params['partition']
+ tagged_interfaces = self.params['tagged_interfaces']
+ untagged_interfaces = self.params['untagged_interfaces']
+
+ if untagged_interfaces is not None or tagged_interfaces is not None:
+ ifcs = self.api.tm.net.interfaces.get_collection()
+ ifcs = [str(x.name) for x in ifcs]
+
+ if len(ifcs) is 0:
+ raise F5ModuleError(
+ 'No interfaces were found'
+ )
+
+ pinterfaces = []
+ if untagged_interfaces:
+ interfaces = untagged_interfaces
+ elif tagged_interfaces:
+ interfaces = tagged_interfaces
+
+ for ifc in interfaces:
+ ifc = str(ifc)
+ if ifc in ifcs:
+ pinterfaces.append(ifc)
+ else:
+ raise F5ModuleError(
+ 'The specified interface "%s" was not found' % (ifc)
+ )
+
+ if tagged_interfaces:
+ tmp = [dict(name=x, tagged=True) for x in pinterfaces]
+ if 'tagged_interfaces' in current:
+ if pinterfaces != current['tagged_interfaces']:
+ params['interfaces'] = tmp
+ else:
+ params['interfaces'] = tmp
+ elif untagged_interfaces:
+ tmp = [dict(name=x, untagged=True) for x in pinterfaces]
+ if 'untagged_interfaces' in current:
+ if pinterfaces != current['untagged_interfaces']:
+ params['interfaces'] = tmp
+ else:
+ params['interfaces'] = tmp
+
+ if description is not None:
+ if 'description' in current:
+ if description != current['description']:
+ params['description'] = description
+ else:
+ params['description'] = description
+
+ if tag is not None:
+ if 'tag' in current:
+ if tag != current['tag']:
+ params['tag'] = tag
+ else:
+ params['tag'] = tag
+
+ if params:
+ changed = True
+ params['name'] = name
+ params['partition'] = partition
+ if check_mode:
+ return changed
+ self.cparams = camel_dict_to_snake_dict(params)
+ else:
+ return changed
+
+ r = self.api.tm.net.vlans.vlan.load(
+ name=name,
+ partition=partition
+ )
+ r.update(**params)
+ r.refresh()
+
+ return True
+
+ def delete(self):
+ params = dict()
+ check_mode = self.params['check_mode']
+
+ params['name'] = self.params['name']
+ params['partition'] = self.params['partition']
+
+ self.cparams = camel_dict_to_snake_dict(params)
+ if check_mode:
+ return True
+
+ dc = self.api.tm.net.vlans.vlan.load(**params)
+ dc.delete()
+
+ if self.exists():
+ raise F5ModuleError("Failed to delete the VLAN")
+ return True
+
+ def exists(self):
+ name = self.params['name']
+ partition = self.params['partition']
+ return self.api.tm.net.vlans.vlan.exists(
+ name=name,
+ partition=partition
+ )
+
+ def flush(self):
+ result = dict()
+ state = self.params['state']
+
+ try:
+ if state == "present":
+ changed = self.present()
+ elif state == "absent":
+ changed = self.absent()
+ except iControlUnexpectedHTTPError as e:
+ raise F5ModuleError(str(e))
+
+ result.update(**self.cparams)
+ result.update(dict(changed=changed))
+ return result
+
+
+def main():
+ argument_spec = f5_argument_spec()
+
+ meta_args = dict(
+ description=dict(required=False, default=None),
+ tagged_interfaces=dict(required=False, default=None, type='list', aliases=['tagged_interface']),
+ untagged_interfaces=dict(required=False, default=None, type='list', aliases=['untagged_interface']),
+ name=dict(required=True),
+ tag=dict(required=False, default=None, type='int')
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['tagged_interfaces', 'untagged_interfaces']
+ ]
+ )
+
+ try:
+ obj = BigIpVlan(check_mode=module.check_mode, **module.params)
+ result = obj.flush()
+
+ module.exit_json(**result)
+ except F5ModuleError as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+from ansible.module_utils.f5 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/haproxy.py b/network/haproxy.py
index cada704e342..5ee3006629e 100644
--- a/network/haproxy.py
+++ b/network/haproxy.py
@@ -18,11 +18,16 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: haproxy
version_added: "1.9"
short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands.
+author: "Ravi Bhure (@ravibhure)"
description:
- Enable, disable, and set weights for HAProxy backend servers using socket
commands.
@@ -60,6 +65,12 @@
required: true
default: null
choices: [ "enabled", "disabled" ]
+ fail_on_not_found:
+ description:
+ - Fail whenever trying to enable/disable a backend host that does not exist
+ required: false
+ default: false
+ version_added: "2.2"
wait:
description:
- Wait until the server reports a status of 'UP' when `state=enabled`, or
@@ -91,38 +102,80 @@
EXAMPLES = '''
# disable server in 'www' backend pool
-- haproxy: state=disabled host={{ inventory_hostname }} backend=www
+- haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
# disable server without backend pool name (apply to all available backend pool)
-- haproxy: state=disabled host={{ inventory_hostname }}
+- haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
# disable server, provide socket file
-- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www
+- haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
# disable server, provide socket file, wait until status reports in maintenance
-- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www wait=yes
+- haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+ wait: yes
# disable backend server in 'www' backend pool and drop open sessions to it
-- haproxy: state=disabled host={{ inventory_hostname }} backend=www socket=/var/run/haproxy.sock shutdown_sessions=true
+- haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ socket: /var/run/haproxy.sock
+ shutdown_sessions: true
+
+# disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found
+- haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ fail_on_not_found: yes
# enable server in 'www' backend pool
-- haproxy: state=enabled host={{ inventory_hostname }} backend=www
+- haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
# enable server in 'www' backend pool wait until healthy
-- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes
+- haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: yes
# enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
-- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes wait_retries=10 wait_interval=5
+- haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: yes
+ wait_retries: 10
+ wait_interval: 5
# enable server in 'www' backend pool with change server(s) weight
-- haproxy: state=enabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock weight=10 backend=www
-
-author: "Ravi Bhure (@ravibhure)"
+- haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ weight: 10
+ backend: www
'''
import socket
import csv
import time
+from string import Template
DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock"
@@ -156,17 +209,17 @@ def __init__(self, module):
self.weight = self.module.params['weight']
self.socket = self.module.params['socket']
self.shutdown_sessions = self.module.params['shutdown_sessions']
+ self.fail_on_not_found = self.module.params['fail_on_not_found']
self.wait = self.module.params['wait']
self.wait_retries = self.module.params['wait_retries']
self.wait_interval = self.module.params['wait_interval']
- self.command_results = []
+ self.command_results = {}
def execute(self, cmd, timeout=200, capture_output=True):
"""
Executes a HAProxy command by sending a message to a HAProxy's local
UNIX socket and waiting up to 'timeout' milliseconds for the response.
"""
-
self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.client.connect(self.socket)
self.client.sendall('%s\n' % cmd)
@@ -177,10 +230,67 @@ def execute(self, cmd, timeout=200, capture_output=True):
result += buf
buf = self.client.recv(RECV_SIZE)
if capture_output:
- self.command_results = result.strip()
+ self.capture_command_output(cmd, result.strip())
self.client.close()
return result
+
+ def capture_command_output(self, cmd, output):
+ """
+ Capture the output for a command
+ """
+ if 'command' not in self.command_results:
+ self.command_results['command'] = []
+ self.command_results['command'].append(cmd)
+ if 'output' not in self.command_results:
+ self.command_results['output'] = []
+ self.command_results['output'].append(output)
+
+
+ def discover_all_backends(self):
+ """
+ Discover all entries with svname = 'BACKEND' and return a list of their corresponding
+ pxnames
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r)))
+
+
+ def execute_for_backends(self, cmd, pxname, svname, wait_for_status = None):
+ """
+ Run some command on the specified backends. If no backends are provided they will
+ be discovered automatically (all backends)
+ """
+ # Discover backends if none are given
+ if pxname is None:
+ backends = self.discover_all_backends()
+ else:
+ backends = [pxname]
+
+ # Run the command for each requested backend
+ for backend in backends:
+ # Fail when backends were not found
+ state = self.get_state_for(backend, svname)
+ if (self.fail_on_not_found or self.wait) and state is None:
+ self.module.fail_json(msg="The specified backend '%s/%s' was not found!" % (backend, svname))
+
+ self.execute(Template(cmd).substitute(pxname = backend, svname = svname))
+ if self.wait:
+ self.wait_until_status(backend, svname, wait_for_status)
+
+
+ def get_state_for(self, pxname, svname):
+ """
+ Find the state of specific services. When pxname is not set, get all backends for a specific host.
+ Returns a list of dictionaries containing the status and weight for those services.
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ state = tuple(map(lambda d: { 'status': d['status'], 'weight': d['weight'] }, filter(lambda d: (pxname is None or d['pxname'] == pxname) and d['svname'] == svname, r)))
+ return state or None
+
+
def wait_until_status(self, pxname, svname, status):
"""
Wait for a service to reach the specified status. Try RETRIES times
@@ -189,55 +299,28 @@ def wait_until_status(self, pxname, svname, status):
not found, the module will fail.
"""
for i in range(1, self.wait_retries):
- data = self.execute('show stat', 200, False).lstrip('# ')
- r = csv.DictReader(data.splitlines())
- found = False
- for row in r:
- if row['pxname'] == pxname and row['svname'] == svname:
- found = True
- if row['status'] == status:
- return True;
- else:
- time.sleep(self.wait_interval)
-
- if not found:
- self.module.fail_json(msg="unable to find server %s/%s" % (pxname, svname))
+ state = self.get_state_for(pxname, svname)
+
+ # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
+ if state[0]['status'] == status:
+ return True
+ else:
+ time.sleep(self.wait_interval)
self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % (pxname, svname, status, self.wait_retries))
+
def enabled(self, host, backend, weight):
"""
Enabled action, marks server to UP and checks are re-enabled,
also supports to get current weight for server (default) and
set the weight for haproxy backend server when provides.
"""
- svname = host
- if self.backend is None:
- output = self.execute('show stat')
- #sanitize and make a list of lines
- output = output.lstrip('# ').strip()
- output = output.split('\n')
- result = output
-
- for line in result:
- if 'BACKEND' in line:
- result = line.split(',')[0]
- pxname = result
- cmd = "get weight %s/%s ; enable server %s/%s" % (pxname, svname, pxname, svname)
- if weight:
- cmd += "; set weight %s/%s %s" % (pxname, svname, weight)
- self.execute(cmd)
- if self.wait:
- self.wait_until_status(pxname, svname, 'UP')
+ cmd = "get weight $pxname/$svname; enable server $pxname/$svname"
+ if weight:
+ cmd += "; set weight $pxname/$svname %s" % weight
+ self.execute_for_backends(cmd, backend, host, 'UP')
- else:
- pxname = backend
- cmd = "get weight %s/%s ; enable server %s/%s" % (pxname, svname, pxname, svname)
- if weight:
- cmd += "; set weight %s/%s %s" % (pxname, svname, weight)
- self.execute(cmd)
- if self.wait:
- self.wait_until_status(pxname, svname, 'UP')
def disabled(self, host, backend, shutdown_sessions):
"""
@@ -245,50 +328,40 @@ def disabled(self, host, backend, shutdown_sessions):
performed on the server until it leaves maintenance,
also it shutdown sessions while disabling backend host server.
"""
- svname = host
- if self.backend is None:
- output = self.execute('show stat')
- #sanitize and make a list of lines
- output = output.lstrip('# ').strip()
- output = output.split('\n')
- result = output
-
- for line in result:
- if 'BACKEND' in line:
- result = line.split(',')[0]
- pxname = result
- cmd = "get weight %s/%s ; disable server %s/%s" % (pxname, svname, pxname, svname)
- if shutdown_sessions:
- cmd += "; shutdown sessions server %s/%s" % (pxname, svname)
- self.execute(cmd)
- if self.wait:
- self.wait_until_status(pxname, svname, 'MAINT')
+ cmd = "get weight $pxname/$svname; disable server $pxname/$svname"
+ if shutdown_sessions:
+ cmd += "; shutdown sessions server $pxname/$svname"
+ self.execute_for_backends(cmd, backend, host, 'MAINT')
- else:
- pxname = backend
- cmd = "get weight %s/%s ; disable server %s/%s" % (pxname, svname, pxname, svname)
- if shutdown_sessions:
- cmd += "; shutdown sessions server %s/%s" % (pxname, svname)
- self.execute(cmd)
- if self.wait:
- self.wait_until_status(pxname, svname, 'MAINT')
def act(self):
"""
Figure out what you want to do from ansible, and then do it.
"""
+ # Get the state before the run
+ state_before = self.get_state_for(self.backend, self.host)
+ self.command_results['state_before'] = state_before
# toggle enable/disbale server
if self.state == 'enabled':
self.enabled(self.host, self.backend, self.weight)
-
elif self.state == 'disabled':
self.disabled(self.host, self.backend, self.shutdown_sessions)
-
else:
self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
- self.module.exit_json(stdout=self.command_results, changed=True)
+ # Get the state after the run
+ state_after = self.get_state_for(self.backend, self.host)
+ self.command_results['state_after'] = state_after
+
+ # Report change status
+ if state_before != state_after:
+ self.command_results['changed'] = True
+ self.module.exit_json(**self.command_results)
+ else:
+ self.command_results['changed'] = False
+ self.module.exit_json(**self.command_results)
+
def main():
@@ -300,12 +373,12 @@ def main():
backend=dict(required=False, default=None),
weight=dict(required=False, default=None),
socket = dict(required=False, default=DEFAULT_SOCKET_LOCATION),
- shutdown_sessions=dict(required=False, default=False),
+ shutdown_sessions=dict(required=False, default=False, type='bool'),
+ fail_on_not_found=dict(required=False, default=False, type='bool'),
wait=dict(required=False, default=False, type='bool'),
wait_retries=dict(required=False, default=WAIT_RETRIES, type='int'),
wait_interval=dict(required=False, default=WAIT_INTERVAL, type='int'),
),
-
)
if not socket:
@@ -317,4 +390,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/illumos/__init__.py b/network/illumos/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/network/illumos/dladm_etherstub.py b/network/illumos/dladm_etherstub.py
new file mode 100644
index 00000000000..861e0a70131
--- /dev/null
+++ b/network/illumos/dladm_etherstub.py
@@ -0,0 +1,181 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Å tevko
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: dladm_etherstub
+short_description: Manage etherstubs on Solaris/illumos systems.
+description:
+ - Create or delete etherstubs on Solaris/illumos systems.
+version_added: "2.2"
+author: Adam Å tevko (@xen0l)
+options:
+ name:
+ description:
+ - Etherstub name.
+ required: true
+ temporary:
+ description:
+ - Specifies that the etherstub is temporary. Temporary etherstubs
+ do not persist across reboots.
+ required: false
+ default: false
+ choices: [ "true", "false" ]
+ state:
+ description:
+ - Create or delete Solaris/illumos etherstub.
+ required: false
+ default: "present"
+ choices: [ "present", "absent" ]
+'''
+
+EXAMPLES = '''
+# Create 'stub0' etherstub
+- dladm_etherstub:
+ name: stub0
+ state: present
+
+# Remove 'stub0 etherstub
+- dladm_etherstub:
+ name: stub0
+ state: absent
+'''
+
+RETURN = '''
+name:
+ description: etherstub name
+ returned: always
+ type: string
+ sample: "switch0"
+state:
+ description: state of the target
+ returned: always
+ type: string
+ sample: "present"
+temporary:
+ description: etherstub's persistence
+ returned: always
+ type: boolean
+ sample: "True"
+'''
+
+
+class Etherstub(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.temporary = module.params['temporary']
+ self.state = module.params['state']
+
+ def etherstub_exists(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('show-etherstub')
+ cmd.append(self.name)
+
+ (rc, _, _) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def create_etherstub(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('create-etherstub')
+
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def delete_etherstub(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('delete-etherstub')
+
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ temporary=dict(default=False, type='bool'),
+ state=dict(default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True
+ )
+
+ etherstub = Etherstub(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = etherstub.name
+ result['state'] = etherstub.state
+ result['temporary'] = etherstub.temporary
+
+ if etherstub.state == 'absent':
+ if etherstub.etherstub_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = etherstub.delete_etherstub()
+ if rc != 0:
+ module.fail_json(name=etherstub.name, msg=err, rc=rc)
+ elif etherstub.state == 'present':
+ if not etherstub.etherstub_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = etherstub.create_etherstub()
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=etherstub.name, msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/illumos/dladm_vnic.py b/network/illumos/dladm_vnic.py
new file mode 100644
index 00000000000..0718517d475
--- /dev/null
+++ b/network/illumos/dladm_vnic.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Å tevko
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: dladm_vnic
+short_description: Manage VNICs on Solaris/illumos systems.
+description:
+ - Create or delete VNICs on Solaris/illumos systems.
+version_added: "2.2"
+author: Adam Å tevko (@xen0l)
+options:
+ name:
+ description:
+ - VNIC name.
+ required: true
+ link:
+ description:
+ - VNIC underlying link name.
+ required: true
+ temporary:
+ description:
+ - Specifies that the VNIC is temporary. Temporary VNICs
+ do not persist across reboots.
+ required: false
+ default: false
+ choices: [ "true", "false" ]
+ mac:
+ description:
+ - Sets the VNIC's MAC address. Must be valid unicast MAC address.
+ required: false
+ default: false
+ aliases: [ "macaddr" ]
+ vlan:
+ description:
+ - Enable VLAN tagging for this VNIC. The VLAN tag will have id
+ I(vlan).
+ required: false
+ default: false
+ aliases: [ "vlan_id" ]
+ state:
+ description:
+ - Create or delete Solaris/illumos VNIC.
+ required: false
+ default: "present"
+ choices: [ "present", "absent" ]
+'''
+
+EXAMPLES = '''
+# Create 'vnic0' VNIC over 'bnx0' link
+- dladm_vnic:
+ name: vnic0
+ link: bnx0
+ state: present
+
+# Create VNIC with specified MAC and VLAN tag over 'aggr0'
+- dladm_vnic:
+ name: vnic1
+ link: aggr0
+ mac: '00:00:5E:00:53:23'
+ vlan: 4
+
+# Remove 'vnic0' VNIC
+- dladm_vnic:
+ name: vnic0
+ link: bnx0
+ state: absent
+'''
+
+RETURN = '''
+name:
+ description: VNIC name
+ returned: always
+ type: string
+ sample: "vnic0"
+link:
+ description: VNIC underlying link name
+ returned: always
+ type: string
+ sample: "igb0"
+state:
+ description: state of the target
+ returned: always
+ type: string
+ sample: "present"
+temporary:
+ description: VNIC's persistence
+ returned: always
+ type: boolean
+ sample: "True"
+mac:
+ description: MAC address to use for VNIC
+ returned: if mac is specified
+ type: string
+ sample: "00:00:5E:00:53:42"
+vlan:
+ description: VLAN to use for VNIC
+ returned: success
+ type: int
+ sample: 42
+'''
+
+import re
+
+
+class VNIC(object):
+
+ UNICAST_MAC_REGEX = r'^[a-f0-9][2-9a-f0]:([a-f0-9]{2}:){4}[a-f0-9]{2}$'
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.link = module.params['link']
+ self.mac = module.params['mac']
+ self.vlan = module.params['vlan']
+ self.temporary = module.params['temporary']
+ self.state = module.params['state']
+
+ def vnic_exists(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('show-vnic')
+ cmd.append(self.name)
+
+ (rc, _, _) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def create_vnic(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('create-vnic')
+
+ if self.temporary:
+ cmd.append('-t')
+
+ if self.mac:
+ cmd.append('-m')
+ cmd.append(self.mac)
+
+ if self.vlan:
+ cmd.append('-v')
+ cmd.append(self.vlan)
+
+ cmd.append('-l')
+ cmd.append(self.link)
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def delete_vnic(self):
+ cmd = [self.module.get_bin_path('dladm', True)]
+
+ cmd.append('delete-vnic')
+
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def is_valid_unicast_mac(self):
+
+ mac_re = re.match(self.UNICAST_MAC_REGEX, self.mac)
+
+ return mac_re is None
+
+ def is_valid_vlan_id(self):
+
+ return 0 <= self.vlan <= 4095
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ link=dict(required=True),
+ mac=dict(default=None, aliases=['macaddr']),
+ vlan=dict(default=None, aliases=['vlan_id']),
+ temporary=dict(default=False, type='bool'),
+ state=dict(default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True
+ )
+
+ vnic = VNIC(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = vnic.name
+ result['link'] = vnic.link
+ result['state'] = vnic.state
+ result['temporary'] = vnic.temporary
+
+ if vnic.mac is not None:
+ if vnic.is_valid_unicast_mac():
+ module.fail_json(msg='Invalid unicast MAC address',
+ mac=vnic.mac,
+ name=vnic.name,
+ state=vnic.state,
+ link=vnic.link,
+ vlan=vnic.vlan)
+ result['mac'] = vnic.mac
+
+ if vnic.vlan is not None:
+ if vnic.is_valid_vlan_id():
+ module.fail_json(msg='Invalid VLAN tag',
+ mac=vnic.mac,
+ name=vnic.name,
+ state=vnic.state,
+ link=vnic.link,
+ vlan=vnic.vlan)
+ result['vlan'] = vnic.vlan
+
+ if vnic.state == 'absent':
+ if vnic.vnic_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = vnic.delete_vnic()
+ if rc != 0:
+ module.fail_json(name=vnic.name, msg=err, rc=rc)
+ elif vnic.state == 'present':
+ if not vnic.vnic_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = vnic.create_vnic()
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=vnic.name, msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/illumos/flowadm.py b/network/illumos/flowadm.py
new file mode 100644
index 00000000000..8b5807f7090
--- /dev/null
+++ b/network/illumos/flowadm.py
@@ -0,0 +1,523 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Adam Å tevko
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: flowadm
+short_description: Manage bandwidth resource control and priority for protocols, services and zones.
+description:
+ - Create/modify/remove networking bandwidth and associated resources for a type of traffic on a particular link.
+version_added: "2.2"
+author: Adam Å tevko (@xen0l)
+options:
+ name:
+ description: >
+ - A flow is defined as a set of attributes based on Layer 3 and Layer 4
+ headers, which can be used to identify a protocol, service, or a zone.
+ required: true
+ aliases: [ 'flow' ]
+ link:
+ description:
+ - Specifiies a link to configure flow on.
+ required: false
+ local_ip:
+ description:
+ - Identifies a network flow by the local IP address.
+ required: false
+ remove_ip:
+ description:
+ - Identifies a network flow by the remote IP address.
+ required: false
+ transport:
+ description: >
+ - Specifies a Layer 4 protocol to be used. It is typically used in combination with I(local_port) to
+ identify the service that needs special attention.
+ required: false
+ local_port:
+ description:
+ - Identifies a service specified by the local port.
+ required: false
+ dsfield:
+ description: >
+ - Identifies the 8-bit differentiated services field (as defined in
+ RFC 2474). The optional dsfield_mask is used to state the bits of interest in
+ the differentiated services field when comparing with the dsfield
+ value. Both values must be in hexadecimal.
+ required: false
+ maxbw:
+ description: >
+ - Sets the full duplex bandwidth for the flow. The bandwidth is
+ specified as an integer with one of the scale suffixes(K, M, or G
+ for Kbps, Mbps, and Gbps). If no units are specified, the input
+ value will be read as Mbps.
+ required: false
+ priority:
+ description:
+ - Sets the relative priority for the flow.
+ required: false
+ default: 'medium'
+ choices: [ 'low', 'medium', 'high' ]
+ temporary:
+ description:
+ - Specifies that the configured flow is temporary. Temporary
+ flows do not persist across reboots.
+ required: false
+ default: false
+ choices: [ "true", "false" ]
+ state:
+ description:
+ - Create/delete/enable/disable an IP address on the network interface.
+ required: false
+ default: present
+ choices: [ 'absent', 'present', 'resetted' ]
+'''
+
+EXAMPLES = '''
+# Limit SSH traffic to 100M via vnic0 interface
+- flowadm:
+ link: vnic0
+ flow: ssh_out
+ transport: tcp
+ local_port: 22
+ maxbw: 100M
+ state: present
+
+# Reset flow properties
+- flowadm:
+ name: dns
+ state: resetted
+
+# Configure policy for EF PHB (DSCP value of 101110 from RFC 2598) with a bandwidth of 500 Mbps and a high priority.
+- flowadm:
+ link: bge0
+ dsfield: '0x2e:0xfc'
+ maxbw: 500M
+ priority: high
+ flow: efphb-flow
+ state: present
+'''
+
+RETURN = '''
+name:
+ description: flow name
+ returned: always
+ type: string
+ sample: "http_drop"
+link:
+ description: flow's link
+ returned: if link is defined
+ type: string
+ sample: "vnic0"
+state:
+ description: state of the target
+ returned: always
+ type: string
+ sample: "present"
+temporary:
+ description: flow's persistence
+ returned: always
+ type: boolean
+ sample: "True"
+priority:
+ description: flow's priority
+ returned: if priority is defined
+ type: string
+ sample: "low"
+transport:
+ description: flow's transport
+ returned: if transport is defined
+ type: string
+ sample: "tcp"
+maxbw:
+ description: flow's maximum bandwidth
+ returned: if maxbw is defined
+ type: string
+ sample: "100M"
+local_Ip:
+ description: flow's local IP address
+ returned: if local_ip is defined
+ type: string
+ sample: "10.0.0.42"
+local_port:
+ description: flow's local port
+ returned: if local_port is defined
+ type: int
+ sample: 1337
+remote_Ip:
+ description: flow's remote IP address
+ returned: if remote_ip is defined
+ type: string
+ sample: "10.0.0.42"
+dsfield:
+ description: flow's differentiated services value
+ returned: if dsfield is defined
+ type: string
+ sample: "0x2e:0xfc"
+'''
+
+
+import socket
+
+SUPPORTED_TRANSPORTS = ['tcp', 'udp', 'sctp', 'icmp', 'icmpv6']
+SUPPORTED_PRIORITIES = ['low', 'medium', 'high']
+
+SUPPORTED_ATTRIBUTES = ['local_ip', 'remote_ip', 'transport', 'local_port', 'dsfield']
+SUPPORTPED_PROPERTIES = ['maxbw', 'priority']
+
+
+class Flow(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.link = module.params['link']
+ self.local_ip = module.params['local_ip']
+ self.remote_ip = module.params['remote_ip']
+ self.transport = module.params['transport']
+ self.local_port = module.params['local_port']
+ self.dsfield = module.params['dsfield']
+ self.maxbw = module.params['maxbw']
+ self.priority = module.params['priority']
+ self.temporary = module.params['temporary']
+ self.state = module.params['state']
+
+ self._needs_updating = {
+ 'maxbw': False,
+ 'priority': False,
+ }
+
+ @classmethod
+ def is_valid_port(cls, port):
+ return 1 <= int(port) <= 65535
+
+ @classmethod
+ def is_valid_address(cls, ip):
+
+ if ip.count('/') == 1:
+ ip_address, netmask = ip.split('/')
+ else:
+ ip_address = ip
+
+ if len(ip_address.split('.')) == 4:
+ try:
+ socket.inet_pton(socket.AF_INET, ip_address)
+ except socket.error:
+ return False
+
+ if not 0 <= netmask <= 32:
+ return False
+ else:
+ try:
+ socket.inet_pton(socket.AF_INET6, ip_address)
+ except socket.error:
+ return False
+
+ if not 0 <= netmask <= 128:
+ return False
+
+ return True
+
+ @classmethod
+ def is_hex(cls, number):
+ try:
+ int(number, 16)
+ except ValueError:
+ return False
+
+ return True
+
+ @classmethod
+ def is_valid_dsfield(cls, dsfield):
+
+ dsmask = None
+
+ if dsfield.count(':') == 1:
+ dsval = dsfield.split(':')[0]
+ else:
+ dsval, dsmask = dsfield.split(':')
+
+ if dsmask and not 0x01 <= int(dsmask, 16) <= 0xff and not 0x01 <= int(dsval, 16) <= 0xff:
+ return False
+ elif not 0x01 <= int(dsval, 16) <= 0xff:
+ return False
+
+ return True
+
+ def flow_exists(self):
+ cmd = [self.module.get_bin_path('flowadm')]
+
+ cmd.append('show-flow')
+ cmd.append(self.name)
+
+ (rc, _, _) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def delete_flow(self):
+ cmd = [self.module.get_bin_path('flowadm')]
+
+ cmd.append('remove-flow')
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def create_flow(self):
+ cmd = [self.module.get_bin_path('flowadm')]
+
+ cmd.append('add-flow')
+ cmd.append('-l')
+ cmd.append(self.link)
+
+ if self.local_ip:
+ cmd.append('-a')
+ cmd.append('local_ip=' + self.local_ip)
+
+ if self.remote_ip:
+ cmd.append('-a')
+ cmd.append('remote_ip=' + self.remote_ip)
+
+ if self.transport:
+ cmd.append('-a')
+ cmd.append('transport=' + self.transport)
+
+ if self.local_port:
+ cmd.append('-a')
+ cmd.append('local_port=' + self.local_port)
+
+ if self.dsfield:
+ cmd.append('-a')
+ cmd.append('dsfield=' + self.dsfield)
+
+ if self.maxbw:
+ cmd.append('-p')
+ cmd.append('maxbw=' + self.maxbw)
+
+ if self.priority:
+ cmd.append('-p')
+ cmd.append('priority=' + self.priority)
+
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def _query_flow_props(self):
+ cmd = [self.module.get_bin_path('flowadm')]
+
+ cmd.append('show-flowprop')
+ cmd.append('-c')
+ cmd.append('-o')
+ cmd.append('property,possible')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def flow_needs_udpating(self):
+ (rc, out, err) = self._query_flow_props()
+
+ NEEDS_UPDATING = False
+
+ if rc == 0:
+ properties = (line.split(':') for line in out.rstrip().split('\n'))
+ for prop, value in properties:
+ if prop == 'maxbw' and self.maxbw != value:
+ self._needs_updating.update({prop: True})
+ NEEDS_UPDATING = True
+
+ elif prop == 'priority' and self.priority != value:
+ self._needs_updating.update({prop: True})
+ NEEDS_UPDATING = True
+
+ return NEEDS_UPDATING
+ else:
+ self.module.fail_json(msg='Error while checking flow properties: %s' % err,
+ stderr=err,
+ rc=rc)
+
+ def update_flow(self):
+ cmd = [self.module.get_bin_path('flowadm')]
+
+ cmd.append('set-flowprop')
+
+ if self.maxbw and self._needs_updating['maxbw']:
+ cmd.append('-p')
+ cmd.append('maxbw=' + self.maxbw)
+
+ if self.priority and self._needs_updating['priority']:
+ cmd.append('-p')
+ cmd.append('priority=' + self.priority)
+
+ if self.temporary:
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['flow']),
+ link=dict(required=False),
+ local_ip=dict(required=False),
+ remote_ip=dict(required=False),
+ transport=dict(required=False, choices=SUPPORTED_TRANSPORTS),
+ local_port=dict(required=False),
+ dsfield=dict(required=False),
+ maxbw=dict(required=False),
+ priority=dict(required=False,
+ default='medium',
+ choices=SUPPORTED_PRIORITIES),
+ temporary=dict(default=False, type='bool'),
+ state=dict(required=False,
+ default='present',
+ choices=['absent', 'present', 'resetted']),
+ ),
+ mutually_exclusive=[
+ ('local_ip', 'remote_ip'),
+ ('local_ip', 'transport'),
+ ('local_ip', 'local_port'),
+ ('local_ip', 'dsfield'),
+ ('remote_ip', 'transport'),
+ ('remote_ip', 'local_port'),
+ ('remote_ip', 'dsfield'),
+ ('transport', 'dsfield'),
+ ('local_port', 'dsfield'),
+ ],
+ supports_check_mode=True
+ )
+
+ flow = Flow(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = flow.name
+ result['state'] = flow.state
+ result['temporary'] = flow.temporary
+
+ if flow.link:
+ result['link'] = flow.link
+
+ if flow.maxbw:
+ result['maxbw'] = flow.maxbw
+
+ if flow.priority:
+ result['priority'] = flow.priority
+
+ if flow.local_ip:
+ if flow.is_valid_address(flow.local_ip):
+ result['local_ip'] = flow.local_ip
+
+ if flow.remote_ip:
+ if flow.is_valid_address(flow.remote_ip):
+ result['remote_ip'] = flow.remote_ip
+
+ if flow.transport:
+ result['transport'] = flow.transport
+
+ if flow.local_port:
+ if flow.is_valid_port(flow.local_port):
+ result['local_port'] = flow.local_port
+ else:
+ module.fail_json(msg='Invalid port: %s' % flow.local_port,
+ rc=1)
+
+ if flow.dsfield:
+ if flow.is_valid_dsfield(flow.dsfield):
+ result['dsfield'] = flow.dsfield
+ else:
+ module.fail_json(msg='Invalid dsfield: %s' % flow.dsfield,
+ rc=1)
+
+ if flow.state == 'absent':
+ if flow.flow_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = flow.delete_flow()
+ if rc != 0:
+ module.fail_json(msg='Error while deleting flow: "%s"' % err,
+ name=flow.name,
+ stderr=err,
+ rc=rc)
+
+ elif flow.state == 'present':
+ if not flow.flow_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = flow.create_flow()
+ if rc != 0:
+ module.fail_json(msg='Error while creating flow: "%s"' % err,
+ name=flow.name,
+ stderr=err,
+ rc=rc)
+ else:
+ if flow.flow_needs_udpating():
+ (rc, out, err) = flow.update_flow()
+ if rc != 0:
+ module.fail_json(msg='Error while updating flow: "%s"' % err,
+ name=flow.name,
+ stderr=err,
+ rc=rc)
+
+ elif flow.state == 'resetted':
+ if flow.flow_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = flow.reset_flow()
+ if rc != 0:
+ module.fail_json(msg='Error while resetting flow: "%s"' % err,
+ name=flow.name,
+ stderr=err,
+ rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/illumos/ipadm_if.py b/network/illumos/ipadm_if.py
new file mode 100644
index 00000000000..d3d0c0af0bd
--- /dev/null
+++ b/network/illumos/ipadm_if.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Å tevko
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipadm_if
+short_description: Manage IP interfaces on Solaris/illumos systems.
+description:
+ - Create, delete, enable or disable IP interfaces on Solaris/illumos
+ systems.
+version_added: "2.2"
+author: Adam Å tevko (@xen0l)
+options:
+ name:
+ description:
+ - IP interface name.
+ required: true
+ temporary:
+ description:
+ - Specifies that the IP interface is temporary. Temporary IP
+ interfaces do not persist across reboots.
+ required: false
+ default: false
+ choices: [ "true", "false" ]
+ state:
+ description:
+ - Create or delete Solaris/illumos IP interfaces.
+ required: false
+ default: "present"
+ choices: [ "present", "absent", "enabled", "disabled" ]
+'''
+
+EXAMPLES = '''
+# Create vnic0 interface
+- ipadm_if:
+ name: vnic0
+ state: enabled
+
+# Disable vnic0 interface
+- ipadm_if:
+ name: vnic0
+ state: disabled
+'''
+
+RETURN = '''
+name:
+ description: IP interface name
+ returned: always
+ type: string
+ sample: "vnic0"
+state:
+ description: state of the target
+ returned: always
+ type: string
+ sample: "present"
+temporary:
+ description: persistence of a IP interface
+ returned: always
+ type: boolean
+ sample: "True"
+'''
+
+
+class IPInterface(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.temporary = module.params['temporary']
+ self.state = module.params['state']
+
+ def interface_exists(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('show-if')
+ cmd.append(self.name)
+
+ (rc, _, _) = self.module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def interface_is_disabled(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('show-if')
+ cmd.append('-o')
+ cmd.append('state')
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(name=self.name, rc=rc, msg=err)
+
+ return 'disabled' in out
+
+ def create_interface(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('create-if')
+
+ if self.temporary:
+ cmd.append('-t')
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def delete_interface(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('delete-if')
+
+ if self.temporary:
+ cmd.append('-t')
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def enable_interface(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('enable-if')
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def disable_interface(self):
+ cmd = [self.module.get_bin_path('ipadm', True)]
+
+ cmd.append('disable-if')
+ cmd.append('-t')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ temporary=dict(default=False, type='bool'),
+ state=dict(default='present', choices=['absent',
+ 'present',
+ 'enabled',
+ 'disabled']),
+ ),
+ supports_check_mode=True
+ )
+
+ interface = IPInterface(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = interface.name
+ result['state'] = interface.state
+ result['temporary'] = interface.temporary
+
+ if interface.state == 'absent':
+ if interface.interface_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = interface.delete_interface()
+ if rc != 0:
+ module.fail_json(name=interface.name, msg=err, rc=rc)
+ elif interface.state == 'present':
+ if not interface.interface_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = interface.create_interface()
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=interface.name, msg=err, rc=rc)
+
+ elif interface.state == 'enabled':
+ if interface.interface_is_disabled():
+ (rc, out, err) = interface.enable_interface()
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=interface.name, msg=err, rc=rc)
+
+ elif interface.state == 'disabled':
+ if not interface.interface_is_disabled():
+ (rc, out, err) = interface.disable_interface()
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=interface.name, msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/illumos/ipadm_prop.py b/network/illumos/ipadm_prop.py
new file mode 100644
index 00000000000..509ff82b1f7
--- /dev/null
+++ b/network/illumos/ipadm_prop.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Å tevko
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipadm_prop
+short_description: Manage protocol properties on Solaris/illumos systems.
+description:
+ - Modify protocol properties on Solaris/illumos systems.
+version_added: "2.2"
+author: Adam Å tevko (@xen0l)
+options:
+ protocol:
+ description:
+ - Specifies the procotol for which we want to manage properties.
+ required: true
+ property:
+ description:
+ - Specifies the name of property we want to manage.
+ required: true
+ value:
+ description:
+ - Specifies the value we want to set for the property.
+ required: false
+ temporary:
+ description:
+ - Specifies that the property value is temporary. Temporary
+ property values do not persist across reboots.
+ required: false
+ default: false
+ choices: [ "true", "false" ]
+ state:
+ description:
+ - Set or reset the property value.
+ required: false
+ default: present
+ choices: [ "present", "absent", "reset" ]
+'''
+
+EXAMPLES = '''
+# Set TCP receive buffer size
+ipadm_prop: protocol=tcp property=recv_buf value=65536
+
+# Reset UDP send buffer size to the default value
+ipadm_prop: protocol=udp property=send_buf state=reset
+'''
+
+RETURN = '''
+protocol:
+ description: property's protocol
+ returned: always
+ type: string
+ sample: "TCP"
+property:
+ description: name of the property
+ returned: always
+ type: string
+ sample: "recv_maxbuf"
+state:
+ description: state of the target
+ returned: always
+ type: string
+ sample: "present"
+temporary:
+ description: property's persistence
+ returned: always
+ type: boolean
+ sample: "True"
+value:
+ description: value of the property
+ returned: always
+ type: int/string (depends on property)
+ sample: 1024/never
+'''
+
+SUPPORTED_PROTOCOLS = ['ipv4', 'ipv6', 'icmp', 'tcp', 'udp', 'sctp']
+
+
+class Prop(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.protocol = module.params['protocol']
+ self.property = module.params['property']
+ self.value = module.params['value']
+ self.temporary = module.params['temporary']
+ self.state = module.params['state']
+
+ def property_exists(self):
+ cmd = [self.module.get_bin_path('ipadm')]
+
+ cmd.append('show-prop')
+ cmd.append('-p')
+ cmd.append(self.property)
+ cmd.append(self.protocol)
+
+ (rc, _, _) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ self.module.fail_json(msg='Unknown property "%s" for protocol %s' %
+ (self.property, self.protocol),
+ protocol=self.protocol,
+ property=self.property)
+
+ def property_is_modified(self):
+ cmd = [self.module.get_bin_path('ipadm')]
+
+ cmd.append('show-prop')
+ cmd.append('-c')
+ cmd.append('-o')
+ cmd.append('current,default')
+ cmd.append('-p')
+ cmd.append(self.property)
+ cmd.append(self.protocol)
+
+ (rc, out, _) = self.module.run_command(cmd)
+
+ out = out.rstrip()
+ (value, default) = out.split(':')
+
+ if rc == 0 and value == default:
+ return True
+ else:
+ return False
+
+ def property_is_set(self):
+ cmd = [self.module.get_bin_path('ipadm')]
+
+ cmd.append('show-prop')
+ cmd.append('-c')
+ cmd.append('-o')
+ cmd.append('current')
+ cmd.append('-p')
+ cmd.append(self.property)
+ cmd.append(self.protocol)
+
+ (rc, out, _) = self.module.run_command(cmd)
+
+ out = out.rstrip()
+
+ if rc == 0 and self.value == out:
+ return True
+ else:
+ return False
+
+ def set_property(self):
+ cmd = [self.module.get_bin_path('ipadm')]
+
+ cmd.append('set-prop')
+
+ if self.temporary:
+ cmd.append('-t')
+
+ cmd.append('-p')
+ cmd.append(self.property + "=" + self.value)
+ cmd.append(self.protocol)
+
+ return self.module.run_command(cmd)
+
+ def reset_property(self):
+ cmd = [self.module.get_bin_path('ipadm')]
+
+ cmd.append('reset-prop')
+
+ if self.temporary:
+ cmd.append('-t')
+
+ cmd.append('-p')
+ cmd.append(self.property)
+ cmd.append(self.protocol)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ protocol=dict(required=True, choices=SUPPORTED_PROTOCOLS),
+ property=dict(required=True),
+ value=dict(required=False),
+ temporary=dict(default=False, type='bool'),
+ state=dict(
+ default='present', choices=['absent', 'present', 'reset']),
+ ),
+ supports_check_mode=True
+ )
+
+ prop = Prop(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['protocol'] = prop.protocol
+ result['property'] = prop.property
+ result['state'] = prop.state
+ result['temporary'] = prop.temporary
+ if prop.value:
+ result['value'] = prop.value
+
+ if prop.state == 'absent' or prop.state == 'reset':
+ if prop.property_exists():
+ if not prop.property_is_modified():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = prop.reset_property()
+ if rc != 0:
+ module.fail_json(protocol=prop.protocol,
+ property=prop.property,
+ msg=err,
+ rc=rc)
+
+ elif prop.state == 'present':
+ if prop.value is None:
+ module.fail_json(msg='Value is mandatory with state "present"')
+
+ if prop.property_exists():
+ if not prop.property_is_set():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = prop.set_property()
+ if rc != 0:
+ module.fail_json(protocol=prop.protocol,
+ property=prop.property,
+ msg=err,
+ rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/ipify_facts.py b/network/ipify_facts.py
new file mode 100644
index 00000000000..4ffe19d3f5c
--- /dev/null
+++ b/network/ipify_facts.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipify_facts
+short_description: Retrieve the public IP of your internet gateway.
+description:
+ - If behind NAT and need to know the public IP of your internet gateway.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ api_url:
+ description:
+ - URL of the ipify.org API service.
+ - C(?format=json) will be appended per default.
+ required: false
+ default: 'https://api.ipify.org'
+ timeout:
+ description:
+ - HTTP connection timeout in seconds.
+ required: false
+ default: 10
+ version_added: "2.3"
+notes:
+ - "Visit https://www.ipify.org to get more information."
+'''
+
+EXAMPLES = '''
+# Gather IP facts from ipify.org
+- name: get my public IP
+ ipify_facts:
+
+# Gather IP facts from your own ipify service endpoint with a custom timeout
+- name: get my public IP
+ ipify_facts:
+ api_url: http://api.example.com/ipify
+ timeout: 20
+'''
+
+RETURN = '''
+---
+ipify_public_ip:
+ description: Public IP of the internet gateway.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+'''
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class IpifyFacts(object):
+
+ def __init__(self):
+ self.api_url = module.params.get('api_url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'ipify_public_ip': None
+ }
+ (response, info) = fetch_url(module=module, url=self.api_url + "?format=json" , force=True, timeout=self.timeout)
+
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout))
+
+ data = json.loads(response.read())
+ result['ipify_public_ip'] = data.get('ip')
+ return result
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec = dict(
+ api_url=dict(default='https://api.ipify.org'),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipify_facts = IpifyFacts().run()
+ ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts)
+ module.exit_json(**ipify_facts_result)
+
+if __name__ == '__main__':
+ main()
diff --git a/network/ipinfoio_facts.py b/network/ipinfoio_facts.py
new file mode 100644
index 00000000000..748c49dcc9a
--- /dev/null
+++ b/network/ipinfoio_facts.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+#
+# (c) 2016, Aleksei Kostiuk
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipinfoio_facts
+short_description: "Retrieve IP geolocation facts of a host's IP address"
+description:
+ - "Gather IP geolocation facts of a host's IP address using ipinfo.io API"
+version_added: "2.3"
+author: "Aleksei Kostiuk (@akostyuk)"
+options:
+ timeout:
+ description:
+ - HTTP connection timeout in seconds
+ required: false
+ default: 10
+ http_agent:
+ description:
+ - Set http user agent
+ required: false
+ default: "ansible-ipinfoio-module/0.0.1"
+notes:
+ - "Check http://ipinfo.io/ for more information"
+'''
+
+EXAMPLES = '''
+# Retrieve geolocation data of a host's IP address
+- name: get IP geolocation data
+ ipinfoio_facts:
+'''
+
+RETURN = '''
+ansible_facts:
+ description: "Dictionary of ip geolocation facts for a host's IP address"
+ returned: changed
+ type: dictionary
+ contains:
+ ip:
+ description: "Public IP address of a host"
+ type: string
+ sample: "8.8.8.8"
+ hostname:
+ description: Domain name
+ type: string
+ sample: "google-public-dns-a.google.com"
+ country:
+ description: ISO 3166-1 alpha-2 country code
+ type: string
+ sample: "US"
+ region:
+ description: State or province name
+ type: string
+ sample: "California"
+ city:
+ description: City name
+ type: string
+ sample: "Mountain View"
+ loc:
+ description: Latitude and Longitude of the location
+ type: string
+ sample: "37.3860,-122.0838"
+ org:
+ description: "organization's name"
+ type: string
+ sample: "AS3356 Level 3 Communications, Inc."
+ postal:
+ description: Postal code
+ type: string
+ sample: "94035"
+'''
+
+USER_AGENT = 'ansible-ipinfoio-module/0.0.1'
+
+
+class IpinfoioFacts(object):
+
+ def __init__(self, module):
+ self.url = 'https://ipinfo.io/json'
+ self.timeout = module.params.get('timeout')
+ self.module = module
+
+ def get_geo_data(self):
+ response, info = fetch_url(self.module, self.url, force=True, # NOQA
+ timeout=self.timeout)
+ try:
+ info['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg='Could not get {} page, '
+ 'check for connectivity!'.format(self.url))
+ else:
+ try:
+ content = response.read()
+ result = self.module.from_json(content.decode('utf8'))
+ except ValueError:
+ self.module.fail_json(
+ msg='Failed to parse the ipinfo.io response: '
+ '{0} {1}'.format(self.url, content))
+ else:
+ return result
+
+
+def main():
+ module = AnsibleModule( # NOQA
+ argument_spec=dict(
+ http_agent=dict(default=USER_AGENT),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipinfoio = IpinfoioFacts(module)
+ ipinfoio_result = dict(
+ changed=False, ansible_facts=ipinfoio.get_geo_data())
+ module.exit_json(**ipinfoio_result)
+
+from ansible.module_utils.basic import * # NOQA
+from ansible.module_utils.urls import * # NOQA
+
+if __name__ == '__main__':
+ main()
diff --git a/network/lldp.py b/network/lldp.py
index fd1b1092d5e..f222d765fe9 100644
--- a/network/lldp.py
+++ b/network/lldp.py
@@ -16,6 +16,10 @@
import subprocess
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: lldp
@@ -36,8 +40,9 @@
lldp:
- name: Print each switch/port
- debug: msg="{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }}
- with_items: lldp.keys()
+ debug:
+ msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }}"
+ with_items: "{{ lldp.keys() }}"
# TASK: [Print each switch/port] ***********************************************************
# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
@@ -82,5 +87,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/netconf/__init__.py b/network/netconf/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/network/netconf/netconf_config.py b/network/netconf/netconf_config.py
new file mode 100755
index 00000000000..7ed79a908b5
--- /dev/null
+++ b/network/netconf/netconf_config.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# (c) 2016, Leandro Lisboa Penz
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: netconf_config
+author: "Leandro Lisboa Penz (@lpenz)"
+short_description: netconf device configuration
+description:
+ - Netconf is a network management protocol developed and standardized by
+ the IETF. It is documented in RFC 6241.
+
+ - This module allows the user to send a configuration XML file to a netconf
+ device, and detects if there was a configuration change.
+notes:
+ - This module supports devices with and without the the candidate and
+ confirmed-commit capabilities. It always use the safer feature.
+version_added: "2.2"
+options:
+ host:
+ description:
+ - the hostname or ip address of the netconf device
+ required: true
+ port:
+ description:
+ - the netconf port
+ default: 830
+ required: false
+ hostkey_verify:
+ description:
+ - if true, the ssh host key of the device must match a ssh key present on the host
+ - if false, the ssh host key of the device is not checked
+ default: true
+ required: false
+ username:
+ description:
+ - the username to authenticate with
+ required: true
+ password:
+ description:
+ - password of the user to authenticate with
+ required: true
+ xml:
+ description:
+ - the XML content to send to the device
+ required: true
+
+
+requirements:
+ - "python >= 2.6"
+ - "ncclient"
+'''
+
+EXAMPLES = '''
+- name: set ntp server in the device
+ netconf_config:
+ host: 10.0.0.1
+ username: admin
+ password: admin
+ xml: |
+
+
+
+ true
+
+ ntp1
+ 127.0.0.1
+
+
+
+
+
+- name: wipe ntp configuration
+ netconf_config:
+ host: 10.0.0.1
+ username: admin
+ password: admin
+ xml: |
+
+
+
+ false
+
+ ntp1
+
+
+
+
+
+'''
+
+RETURN = '''
+server_capabilities:
+ description: list of capabilities of the server
+ returned: success
+ type: list of strings
+ sample: ['urn:ietf:params:netconf:base:1.1','urn:ietf:params:netconf:capability:confirmed-commit:1.0','urn:ietf:params:netconf:capability:candidate:1.0']
+
+'''
+
+import xml.dom.minidom
+try:
+ import ncclient.manager
+ HAS_NCCLIENT = True
+except ImportError:
+ HAS_NCCLIENT = False
+
+
+import logging
+
+
+def netconf_edit_config(m, xml, commit, retkwargs):
+ if ":candidate" in m.server_capabilities:
+ datastore = 'candidate'
+ else:
+ datastore = 'running'
+ m.lock(target=datastore)
+ try:
+ m.discard_changes()
+ config_before = m.get_config(source=datastore)
+ m.edit_config(target=datastore, config=xml)
+ config_after = m.get_config(source=datastore)
+ changed = config_before.data_xml != config_after.data_xml
+ if changed and commit:
+ if ":confirmed-commit" in m.server_capabilities:
+ m.commit(confirmed=True)
+ m.commit()
+ else:
+ m.commit()
+ return changed
+ finally:
+ m.unlock(target=datastore)
+
+
+# ------------------------------------------------------------------- #
+# Main
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ port=dict(type='int', default=830),
+ hostkey_verify=dict(type='bool', default=True),
+ username=dict(type='str', required=True, no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ xml=dict(type='str', required=True),
+ )
+ )
+
+ if not HAS_NCCLIENT:
+ module.fail_json(msg='could not import the python library '
+ 'ncclient required by this module')
+
+ try:
+ xml.dom.minidom.parseString(module.params['xml'])
+ except:
+ e = get_exception()
+ module.fail_json(
+ msg='error parsing XML: ' +
+ str(e)
+ )
+ return
+
+ nckwargs = dict(
+ host=module.params['host'],
+ port=module.params['port'],
+ hostkey_verify=module.params['hostkey_verify'],
+ username=module.params['username'],
+ password=module.params['password'],
+ )
+ retkwargs = dict()
+
+ try:
+ m = ncclient.manager.connect(**nckwargs)
+ except ncclient.transport.errors.AuthenticationError:
+ module.fail_json(
+ msg='authentication failed while connecting to device'
+ )
+ except:
+ e = get_exception()
+ module.fail_json(
+ msg='error connecting to the device: ' +
+ str(e)
+ )
+ return
+ retkwargs['server_capabilities'] = list(m.server_capabilities)
+ try:
+ changed = netconf_edit_config(
+ m=m,
+ xml=module.params['xml'],
+ commit=True,
+ retkwargs=retkwargs,
+ )
+ finally:
+ m.close_session()
+ module.exit_json(changed=changed, **retkwargs)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nmcli.py b/network/nmcli.py
index ccefef18ccf..86a844c7ee0 100644
--- a/network/nmcli.py
+++ b/network/nmcli.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION='''
---
module: nmcli
@@ -73,16 +77,16 @@
required: False
default: None
description:
- - 'The IPv4 address to this interface using this format ie: "192.168.1.24/24"'
+ - 'The IPv4 address to this interface using this format ie: "192.0.2.24/24"'
gw4:
required: False
description:
- - 'The IPv4 gateway for this interface using this format ie: "192.168.100.1"'
+ - 'The IPv4 gateway for this interface using this format ie: "192.0.2.1"'
dns4:
required: False
default: None
description:
- - 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["8.8.8.8 8.8.4.4"]'
+ - 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["192.0.2.53", "198.51.100.53"]'
ip6:
required: False
default: None
@@ -228,46 +232,89 @@
```yml
---
#devops_os_define_network
-storage_gw: "192.168.0.254"
-external_gw: "10.10.0.254"
-tenant_gw: "172.100.0.254"
+storage_gw: "192.0.2.254"
+external_gw: "198.51.100.254"
+tenant_gw: "203.0.113.254"
#Team vars
nmcli_team:
- - {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"}
- - {conn_name: 'external', ip4: "{{external_ip}}", gw4: "{{external_gw}}"}
- - {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"}
+ - conn_name: tenant
+ ip4: '{{ tenant_ip }}'
+ gw4: '{{ tenant_gw }}'
+ - conn_name: external
+ ip4: '{{ external_ip }}'
+ gw4: '{{ external_gw }}'
+ - conn_name: storage
+ ip4: '{{ storage_ip }}'
+ gw4: '{{ storage_gw }}'
nmcli_team_slave:
- - {conn_name: 'em1', ifname: 'em1', master: 'tenant'}
- - {conn_name: 'em2', ifname: 'em2', master: 'tenant'}
- - {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'}
- - {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'}
+ - conn_name: em1
+ ifname: em1
+ master: tenant
+ - conn_name: em2
+ ifname: em2
+ master: tenant
+ - conn_name: p2p1
+ ifname: p2p1
+ master: storage
+ - conn_name: p2p2
+ ifname: p2p2
+ master: external
#bond vars
nmcli_bond:
- - {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: '', mode: 'balance-rr'}
- - {conn_name: 'external', ip4: "{{external_ip}}", gw4: '', mode: 'balance-rr'}
- - {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}", mode: 'balance-rr'}
+ - conn_name: tenant
+ ip4: '{{ tenant_ip }}'
+ gw4: ''
+ mode: balance-rr
+ - conn_name: external
+ ip4: '{{ external_ip }}'
+ gw4: ''
+ mode: balance-rr
+ - conn_name: storage
+ ip4: '{{ storage_ip }}'
+ gw4: '{{ storage_gw }}'
+ mode: balance-rr
nmcli_bond_slave:
- - {conn_name: 'em1', ifname: 'em1', master: 'tenant'}
- - {conn_name: 'em2', ifname: 'em2', master: 'tenant'}
- - {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'}
- - {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'}
+ - conn_name: em1
+ ifname: em1
+ master: tenant
+ - conn_name: em2
+ ifname: em2
+ master: tenant
+ - conn_name: p2p1
+ ifname: p2p1
+ master: storage
+ - conn_name: p2p2
+ ifname: p2p2
+ master: external
#ethernet vars
nmcli_ethernet:
- - {conn_name: 'em1', ifname: 'em1', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"}
- - {conn_name: 'em2', ifname: 'em2', ip4: "{{tenant_ip1}}", gw4: "{{tenant_gw}}"}
- - {conn_name: 'p2p1', ifname: 'p2p1', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"}
- - {conn_name: 'p2p2', ifname: 'p2p2', ip4: "{{external_ip}}", gw4: "{{external_gw}}"}
+ - conn_name: em1
+ ifname: em1
+ ip4: '{{ tenant_ip }}'
+ gw4: '{{ tenant_gw }}'
+ - conn_name: em2
+ ifname: em2
+ ip4: '{{ tenant_ip1 }}'
+ gw4: '{{ tenant_gw }}'
+ - conn_name: p2p1
+ ifname: p2p1
+ ip4: '{{ storage_ip }}'
+ gw4: '{{ storage_gw }}'
+ - conn_name: p2p2
+ ifname: p2p2
+ ip4: '{{ external_ip }}'
+ gw4: '{{ external_gw }}'
```
### host_vars
```yml
---
-storage_ip: "192.168.160.21/23"
-external_ip: "10.10.152.21/21"
-tenant_ip: "192.168.200.21/23"
+storage_ip: "192.0.2.91/23"
+external_ip: "198.51.100.23/21"
+tenant_ip: "203.0.113.77/23"
```
@@ -280,41 +327,70 @@
remote_user: root
tasks:
-- name: install needed network manager libs
- yum: name={{ item }} state=installed
- with_items:
- - libnm-qt-devel.x86_64
- - nm-connection-editor.x86_64
- - libsemanage-python
- - policycoreutils-python
+ - name: install needed network manager libs
+ yum:
+ name: '{{ item }}'
+ state: installed
+ with_items:
+ - NetworkManager-glib
+ - libnm-qt-devel.x86_64
+ - nm-connection-editor.x86_64
+ - libsemanage-python
+ - policycoreutils-python
##### Working with all cloud nodes - Teaming
- name: try nmcli add team - conn_name only & ip4 gw4
- nmcli: type=team conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present
+ nmcli:
+ type: team
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
with_items:
- - "{{nmcli_team}}"
+ - '{{ nmcli_team }}'
- name: try nmcli add teams-slave
- nmcli: type=team-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present
+ nmcli:
+ type: team-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
with_items:
- - "{{nmcli_team_slave}}"
+ - '{{ nmcli_team_slave }}'
###### Working with all cloud nodes - Bonding
# - name: try nmcli add bond - conn_name only & ip4 gw4 mode
-# nmcli: type=bond conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} mode={{item.mode}} state=present
+# nmcli:
+# type: bond
+# conn_name: '{{ item.conn_name }}'
+# ip4: '{{ item.ip4 }}'
+# gw4: '{{ item.gw4 }}'
+# mode: '{{ item.mode }}'
+# state: present
# with_items:
-# - "{{nmcli_bond}}"
+# - '{{ nmcli_bond }}'
#
# - name: try nmcli add bond-slave
-# nmcli: type=bond-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present
+# nmcli:
+# type: bond-slave
+# conn_name: '{{ item.conn_name }}'
+# ifname: '{{ item.ifname }}'
+# master: '{{ item.master }}'
+# state: present
# with_items:
-# - "{{nmcli_bond_slave}}"
+# - '{{ nmcli_bond_slave }}'
##### Working with all cloud nodes - Ethernet
# - name: nmcli add Ethernet - conn_name only & ip4 gw4
-# nmcli: type=ethernet conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present
+# nmcli:
+# type: ethernet
+# conn_name: '{{ item.conn_name }}'
+# ip4: '{{ item.ip4 }}'
+# gw4: '{{ item.gw4 }}'
+# state: present
# with_items:
-# - "{{nmcli_ethernet}}"
+# - '{{ nmcli_ethernet }}'
```
## playbook-del.yml example
@@ -326,41 +402,77 @@
tasks:
- name: try nmcli del team - multiple
- nmcli: conn_name={{item.conn_name}} state=absent
+ nmcli:
+ conn_name: '{{ item.conn_name }}'
+ state: absent
with_items:
- - { conn_name: 'em1'}
- - { conn_name: 'em2'}
- - { conn_name: 'p1p1'}
- - { conn_name: 'p1p2'}
- - { conn_name: 'p2p1'}
- - { conn_name: 'p2p2'}
- - { conn_name: 'tenant'}
- - { conn_name: 'storage'}
- - { conn_name: 'external'}
- - { conn_name: 'team-em1'}
- - { conn_name: 'team-em2'}
- - { conn_name: 'team-p1p1'}
- - { conn_name: 'team-p1p2'}
- - { conn_name: 'team-p2p1'}
- - { conn_name: 'team-p2p2'}
+ - conn_name: em1
+ - conn_name: em2
+ - conn_name: p1p1
+ - conn_name: p1p2
+ - conn_name: p2p1
+ - conn_name: p2p2
+ - conn_name: tenant
+ - conn_name: storage
+ - conn_name: external
+ - conn_name: team-em1
+ - conn_name: team-em2
+ - conn_name: team-p1p1
+ - conn_name: team-p1p2
+ - conn_name: team-p2p1
+ - conn_name: team-p2p2
```
# To add an Ethernet connection with static IP configuration, issue a command as follows
-- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present
+- nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
# To add an Team connection with static IP configuration, issue a command as follows
-- nmcli: conn_name=my-team1 ifname=my-team1 type=team ip4=192.168.100.100/24 gw4=192.168.100.1 state=present autoconnect=yes
+- nmcli:
+ conn_name: my-team1
+ ifname: my-team1
+ type: team
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+ autoconnect: yes
# Optionally, at the same time specify IPv6 addresses for the device as follows:
-- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 ip6=abbe::cafe gw6=2001:db8::1 state=present
+- nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ ip6: '2001:db8::cafe'
+ gw6: '2001:db8::1'
+ state: present
# To add two IPv4 DNS server addresses:
--nmcli: conn_name=my-eth1 dns4=["8.8.8.8", "8.8.4.4"] state=present
+- nmcli:
+ conn_name: my-eth1
+ dns4:
+ - 192.0.2.53
+ - 198.51.100.53
+ state: present
# To make a profile usable for all compatible Ethernet interfaces, issue a command as follows
-- nmcli: ctype=ethernet name=my-eth1 ifname="*" state=present
+- nmcli:
+ ctype: ethernet
+ name: my-eth1
+ ifname: *
+ state: present
# To change the property of a setting e.g. MTU, issue a command as follows:
-- nmcli: conn_name=my-eth1 mtu=9000 state=present
+- nmcli:
+ conn_name: my-eth1
+ mtu: 9000
+ type: ethernet
+ state: present
Exit Status's:
- nmcli exits with status 0 if it succeeds, a value greater than 0 is
@@ -380,8 +492,21 @@
# import ansible.module_utils.basic
import os
import sys
-import dbus
-from gi.repository import NetworkManager, NMClient
+HAVE_DBUS=False
+try:
+ import dbus
+ HAVE_DBUS=True
+except ImportError:
+ pass
+
+HAVE_NM_CLIENT=False
+try:
+ from gi.repository import NetworkManager, NMClient
+ HAVE_NM_CLIENT=True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
class Nmcli(object):
@@ -479,7 +604,7 @@ def merge_secrets(self, proxy, config, setting_name):
for setting in secrets:
for key in secrets[setting]:
config[setting_name][key]=secrets[setting][key]
- except Exception, e:
+ except Exception as e:
pass
def dict_to_string(self, d):
@@ -490,13 +615,13 @@ def dict_to_string(self, d):
val=d[key]
str_val=""
add_string=True
- if type(val)==type(dbus.Array([])):
+ if isinstance(val, dbus.Array):
for elt in val:
- if type(elt)==type(dbus.Byte(1)):
+ if isinstance(elt, dbus.Byte):
str_val+="%s " % int(elt)
- elif type(elt)==type(dbus.String("")):
+ elif isinstance(elt, dbus.String):
str_val+="%s" % elt
- elif type(val)==type(dbus.Dictionary({})):
+ elif isinstance(val, dbus.Dictionary):
dstr+=self.dict_to_string(val)
add_string=False
else:
@@ -513,6 +638,12 @@ def connection_to_string(self, config):
return setting_list
# print ""
+ def bool_to_string(self, boolean):
+ if boolean:
+ return "yes"
+ else:
+ return "no"
+
def list_connection_info(self):
# Ask the settings service for the list of connections it provides
bus=dbus.SystemBus()
@@ -601,7 +732,7 @@ def create_connection_team(self):
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
- cmd.append(self.autoconnect)
+ cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def modify_connection_team(self):
@@ -624,13 +755,13 @@ def modify_connection_team(self):
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
- cmd.append(self.gw4)
+ cmd.append(self.gw6)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.autoconnect is not None:
cmd.append('autoconnect')
- cmd.append(self.autoconnect)
+ cmd.append(self.bool_to_string(self.autoconnect))
# Can't use MTU with team
return cmd
@@ -703,7 +834,7 @@ def create_connection_bond(self):
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
- cmd.append(self.autoconnect)
+ cmd.append(self.bool_to_string(self.autoconnect))
if self.mode is not None:
cmd.append('mode')
cmd.append(self.mode)
@@ -744,13 +875,13 @@ def modify_connection_bond(self):
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
- cmd.append(self.gw4)
+ cmd.append(self.gw6)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.autoconnect is not None:
cmd.append('autoconnect')
- cmd.append(self.autoconnect)
+ cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def create_connection_bond_slave(self):
@@ -789,8 +920,8 @@ def create_connection_ethernet(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
- # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present
- # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1
+ # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
+ # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
cmd.append('con')
cmd.append('add')
cmd.append('type')
@@ -819,15 +950,15 @@ def create_connection_ethernet(self):
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
- cmd.append(self.autoconnect)
+ cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def modify_connection_ethernet(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
- # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present
- # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1
+ # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
+ # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
@@ -845,7 +976,7 @@ def modify_connection_ethernet(self):
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
- cmd.append(self.gw4)
+ cmd.append(self.gw6)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
@@ -854,7 +985,7 @@ def modify_connection_ethernet(self):
cmd.append(self.mtu)
if self.autoconnect is not None:
cmd.append('autoconnect')
- cmd.append(self.autoconnect)
+ cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def create_connection_bridge(self):
@@ -963,7 +1094,7 @@ def main():
# Parsing argument file
module=AnsibleModule(
argument_spec=dict(
- autoconnect=dict(required=False, default=None, choices=['yes', 'no'], type='str'),
+ autoconnect=dict(required=False, default=None, type='bool'),
state=dict(required=True, choices=['present', 'absent'], type='str'),
conn_name=dict(required=True, type='str'),
master=dict(required=False, default=None, type='str'),
@@ -986,7 +1117,7 @@ def main():
mtu=dict(required=False, default=None, type='str'),
mac=dict(required=False, default=None, type='str'),
# bridge specific vars
- stp=dict(required=False, default='yes', choices=['yes', 'no'], type='str'),
+ stp=dict(required=False, default=True, type='bool'),
priority=dict(required=False, default="128", type='str'),
slavepriority=dict(required=False, default="32", type='str'),
forwarddelay=dict(required=False, default="15", type='str'),
@@ -1003,6 +1134,12 @@ def main():
supports_check_mode=True
)
+ if not HAVE_DBUS:
+ module.fail_json(msg="This module requires dbus python bindings")
+
+ if not HAVE_NM_CLIENT:
+ module.fail_json(msg="This module requires NetworkManager glib API")
+
nmcli=Nmcli(module)
rc=None
@@ -1057,7 +1194,5 @@ def main():
module.exit_json(**result)
-# import module snippets
-from ansible.module_utils.basic import *
-
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/openvswitch_bridge.py b/network/openvswitch_bridge.py
index 411b95b9dc1..9816e2bff3a 100644
--- a/network/openvswitch_bridge.py
+++ b/network/openvswitch_bridge.py
@@ -22,6 +22,10 @@
# pylint: disable=C0111
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: openvswitch_bridge
@@ -35,7 +39,19 @@
bridge:
required: true
description:
- - Name of bridge to manage
+ - Name of bridge or fake bridge to manage
+ parent:
+ version_added: "2.3"
+ required: false
+ default: None
+ description:
+ - Bridge parent of the fake bridge to manage
+ vlan:
+ version_added: "2.3"
+ required: false
+ default: None
+ description:
+ - The VLAN id of the fake bridge to manage (must be between 0 and 4095)
state:
required: false
default: "present"
@@ -65,13 +81,25 @@
EXAMPLES = '''
# Create a bridge named br-int
-- openvswitch_bridge: bridge=br-int state=present
+- openvswitch_bridge:
+ bridge: br-int
+ state: present
+
+# Create a fake bridge named br-int within br-parent on the VLAN 405
+- openvswitch_bridge:
+ bridge: br-int
+ parent: br-parent
+ vlan: 405
+ state: present
# Create an integration bridge
-- openvswitch_bridge: bridge=br-int state=present fail_mode=secure
+- openvswitch_bridge:
+ bridge: br-int
+ state: present
+ fail_mode: secure
args:
external_ids:
- bridge-id: "br-int"
+ bridge-id: br-int
'''
@@ -80,10 +108,18 @@ class OVSBridge(object):
def __init__(self, module):
self.module = module
self.bridge = module.params['bridge']
+ self.parent = module.params['parent']
+ self.vlan = module.params['vlan']
self.state = module.params['state']
self.timeout = module.params['timeout']
self.fail_mode = module.params['fail_mode']
+ if self.parent:
+ if self.vlan is None:
+ self.module.fail_json(msg='VLAN id must be set when parent is defined')
+ elif self.vlan < 0 or self.vlan > 4095:
+ self.module.fail_json(msg='Invalid VLAN ID (must be between 0 and 4095)')
+
def _vsctl(self, command):
'''Run ovs-vsctl command'''
return self.module.run_command(['ovs-vsctl', '-t',
@@ -100,7 +136,11 @@ def exists(self):
def add(self):
'''Create the bridge'''
- rtc, _, err = self._vsctl(['add-br', self.bridge])
+ if self.parent and self.vlan: # Add fake bridge
+ rtc, _, err = self._vsctl(['add-br', self.bridge, self.parent, self.vlan])
+ else:
+ rtc, _, err = self._vsctl(['add-br', self.bridge])
+
if rtc != 0:
self.module.fail_json(msg=err)
if self.fail_mode:
@@ -143,7 +183,8 @@ def check(self):
changed = True
elif self.state == 'present' and not self.exists():
changed = True
- except Exception, earg:
+ except Exception:
+ earg = get_exception()
self.module.fail_json(msg=str(earg))
# pylint: enable=W0703
@@ -189,7 +230,8 @@ def run(self):
self.set_external_id(key, None)):
changed = True
- except Exception, earg:
+ except Exception:
+ earg = get_exception()
self.module.fail_json(msg=str(earg))
# pylint: enable=W0703
self.module.exit_json(changed=changed)
@@ -247,9 +289,11 @@ def main():
module = AnsibleModule(
argument_spec={
'bridge': {'required': True},
+ 'parent': {'default': None},
+ 'vlan': {'default': None, 'type': 'int'},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'timeout': {'default': 5, 'type': 'int'},
- 'external_ids': {'default': None},
+ 'external_ids': {'default': None, 'type': 'dict'},
'fail_mode': {'default': None},
},
supports_check_mode=True,
@@ -267,4 +311,7 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/network/openvswitch_db.py b/network/openvswitch_db.py
index e6ec2658e0b..6d769e43672 100644
--- a/network/openvswitch_db.py
+++ b/network/openvswitch_db.py
@@ -23,6 +23,10 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: openvswitch_db
@@ -63,12 +67,20 @@
EXAMPLES = '''
# Increase the maximum idle time to 50 seconds before pruning unused kernel
# rules.
-- openvswitch_db: table=open_vswitch record=. col=other_config key=max-idle
- value=50000
+- openvswitch_db:
+ table: open_vswitch
+ record: .
+ col: other_config
+ key: max-idle
+ value: 50000
# Disable in band copy
-- openvswitch_db: table=Bridge record=br-int col=other_config
- key=disable-in-band value=true
+- openvswitch_db:
+ table: Bridge
+ record: br-int
+ col: other_config
+ key: disable-in-band
+ value: true
'''
@@ -129,4 +141,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/network/openvswitch_port.py b/network/openvswitch_port.py
index e98453fc95f..759a2489c16 100644
--- a/network/openvswitch_port.py
+++ b/network/openvswitch_port.py
@@ -22,6 +22,10 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: openvswitch_port
@@ -40,6 +44,11 @@
required: true
description:
- Name of port to manage on the bridge
+ tag:
+ version_added: 2.2
+ required: false
+ description:
+ - VLAN tag for this port
state:
required: false
default: "present"
@@ -67,21 +76,38 @@
EXAMPLES = '''
# Creates port eth2 on bridge br-ex
-- openvswitch_port: bridge=br-ex port=eth2 state=present
-
-# Creates port eth6 and set ofport equal to 6.
-- openvswitch_port: bridge=bridge-loop port=eth6 state=present
- set Interface eth6 ofport_request=6
-
-# Assign interface id server1-vifeth6 and mac address 52:54:00:30:6d:11
+- openvswitch_port:
+ bridge: br-ex
+ port: eth2
+ state: present
+
+# Creates port eth6
+- openvswitch_port:
+ bridge: bridge-loop
+ port: eth6
+ state: present
+ set: Interface eth6
+
+# Creates port vlan10 with tag 10 on bridge br-ex
+- openvswitch_port:
+ bridge: br-ex
+ port: vlan10
+ tag: 10
+ state: present
+ set: Interface vlan10
+
+# Assign interface id server1-vifeth6 and mac address 00:00:5E:00:53:23
# to port vifeth6 and setup port to be managed by a controller.
-- openvswitch_port: bridge=br-int port=vifeth6 state=present
+- openvswitch_port:
+ bridge: br-int
+ port: vifeth6
+ state: present
args:
external_ids:
- iface-id: "{{inventory_hostname}}-vifeth6"
- attached-mac: "52:54:00:30:6d:11"
- vm-id: "{{inventory_hostname}}"
- iface-status: "active"
+ iface-id: '{{ inventory_hostname }}-vifeth6'
+ attached-mac: '00:00:5E:00:53:23'
+ vm-id: '{{ inventory_hostname }}'
+ iface-status: active
'''
# pylint: disable=W0703
@@ -118,6 +144,7 @@ def __init__(self, module):
self.module = module
self.bridge = module.params['bridge']
self.port = module.params['port']
+ self.tag = module.params['tag']
self.state = module.params['state']
self.timeout = module.params['timeout']
self.set_opt = module.params.get('set', None)
@@ -136,11 +163,11 @@ def exists(self):
if rtc != 0:
self.module.fail_json(msg=err)
- return any(port.rstrip() == self.port for port in out.split('\n'))
+ return any(port.rstrip() == self.port for port in out.split('\n')) or self.port == self.bridge
def set(self, set_opt):
""" Set attributes on a port. """
- self.module("set called %s" % set_opt)
+ self.module.log("set called %s" % set_opt)
if (not set_opt):
return False
@@ -167,6 +194,8 @@ def set(self, set_opt):
def add(self):
'''Add the port'''
cmd = ['add-port', self.bridge, self.port]
+ if self.tag:
+ cmd += ["tag=" + self.tag]
if self.set and self.set_opt:
cmd += ["--", "set"]
cmd += self.set_opt.split(" ")
@@ -192,7 +221,8 @@ def check(self):
changed = True
else:
changed = False
- except Exception, earg:
+ except Exception:
+ earg = get_exception()
self.module.fail_json(msg=str(earg))
self.module.exit_json(changed=changed)
@@ -223,7 +253,8 @@ def run(self):
external_id = fmt_opt % (self.port, key, value)
changed = self.set(external_id) or changed
##
- except Exception, earg:
+ except Exception:
+ earg = get_exception()
self.module.fail_json(msg=str(earg))
self.module.exit_json(changed=changed)
@@ -235,10 +266,11 @@ def main():
argument_spec={
'bridge': {'required': True},
'port': {'required': True},
+ 'tag': {'required': False},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'timeout': {'default': 5, 'type': 'int'},
'set': {'required': False, 'default': None},
- 'external_ids': {'default': {}, 'required': False},
+ 'external_ids': {'default': {}, 'required': False, 'type': 'dict'},
},
supports_check_mode=True,
)
@@ -256,4 +288,7 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/network/panos/__init__.py b/network/panos/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/network/panos/panos_admin.py b/network/panos/panos_admin.py
new file mode 100755
index 00000000000..dd36ac08977
--- /dev/null
+++ b/network/panos/panos_admin.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Ansible module to manage PaloAltoNetworks Firewall
+# (c) 2016, techbizdev
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: panos_admin
+short_description: Add or modify PAN-OS user accounts password.
+description:
+ - PanOS module that allows changes to the user account passwords by doing
+ API calls to the Firewall using pan-api as the protocol.
+author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
+version_added: "2.3"
+requirements:
+ - pan-python
+options:
+ ip_address:
+ description:
+ - IP address (or hostname) of PAN-OS device
+ required: true
+ password:
+ description:
+ - password for authentication
+ required: true
+ username:
+ description:
+ - username for authentication
+ required: false
+ default: "admin"
+ admin_username:
+ description:
+ - username for admin user
+ required: false
+ default: "admin"
+ admin_password:
+ description:
+ - password for admin user
+ required: true
+ role:
+ description:
+ - role for admin user
+ required: false
+ default: null
+ commit:
+ description:
+ - commit if changed
+ required: false
+ default: true
+'''
+
+EXAMPLES = '''
+# Set the password of user admin to "badpassword"
+# Doesn't commit the candidate config
+ - name: set admin password
+ panos_admin:
+ ip_address: "192.168.1.1"
+ password: "admin"
+ admin_username: admin
+ admin_password: "badpassword"
+ commit: False
+'''
+
+RETURN = '''
+status:
+ description: success status
+ returned: success
+ type: string
+ sample: "okey dokey"
+'''
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ import pan.xapi
+ HAS_LIB = True
+except ImportError:
+ HAS_LIB = False
+
+_ADMIN_XPATH = "/config/mgt-config/users/entry[@name='%s']"
+
+
+def admin_exists(xapi, admin_username):
+ xapi.get(_ADMIN_XPATH % admin_username)
+ e = xapi.element_root.find('.//entry')
+ return e
+
+
+def admin_set(xapi, module, admin_username, admin_password, role):
+ if admin_password is not None:
+ xapi.op(cmd='request password-hash password "%s"' % admin_password,
+ cmd_xml=True)
+ r = xapi.element_root
+ phash = r.find('.//phash').text
+ if role is not None:
+ rbval = "yes"
+ if role != "superuser" and role != 'superreader':
+ rbval = ""
+
+ ea = admin_exists(xapi, admin_username)
+ if ea is not None:
+ # user exists
+ changed = False
+
+ if role is not None:
+ rb = ea.find('.//role-based')
+ if rb is not None:
+ if rb[0].tag != role:
+ changed = True
+ xpath = _ADMIN_XPATH % admin_username
+ xpath += '/permissions/role-based/%s' % rb[0].tag
+ xapi.delete(xpath=xpath)
+
+ xpath = _ADMIN_XPATH % admin_username
+ xpath += '/permissions/role-based'
+ xapi.set(xpath=xpath,
+ element='<%s>%s%s>' % (role, rbval, role))
+
+ if admin_password is not None:
+ xapi.edit(xpath=_ADMIN_XPATH % admin_username+'/phash',
+ element='%s' % phash)
+ changed = True
+
+ return changed
+
+ # setup the non encrypted part of the monitor
+ exml = []
+
+ exml.append('%s' % phash)
+ exml.append('<%s>%s%s>'
+ '' % (role, rbval, role))
+
+ exml = ''.join(exml)
+ # module.fail_json(msg=exml)
+
+ xapi.set(xpath=_ADMIN_XPATH % admin_username, element=exml)
+
+ return True
+
+
+def main():
+ argument_spec = dict(
+ ip_address=dict(),
+ password=dict(no_log=True),
+ username=dict(default='admin'),
+ admin_username=dict(default='admin'),
+ admin_password=dict(no_log=True),
+ role=dict(),
+ commit=dict(type='bool', default=True)
+ )
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_LIB:
+ module.fail_json(msg='pan-python required for this module')
+
+ ip_address = module.params["ip_address"]
+ if not ip_address:
+ module.fail_json(msg="ip_address should be specified")
+ password = module.params["password"]
+ if not password:
+ module.fail_json(msg="password is required")
+ username = module.params['username']
+
+ xapi = pan.xapi.PanXapi(
+ hostname=ip_address,
+ api_username=username,
+ api_password=password
+ )
+
+ admin_username = module.params['admin_username']
+ if admin_username is None:
+ module.fail_json(msg="admin_username is required")
+ admin_password = module.params['admin_password']
+ role = module.params['role']
+ commit = module.params['commit']
+
+ changed = admin_set(xapi, module, admin_username, admin_password, role)
+
+ if changed and commit:
+ xapi.commit(cmd="", sync=True, interval=1)
+
+ module.exit_json(changed=changed, msg="okey dokey")
+
+if __name__ == '__main__':
+ main()
diff --git a/network/snmp_facts.py b/network/snmp_facts.py
index 81a91ee6eb2..7801d0f2955 100644
--- a/network/snmp_facts.py
+++ b/network/snmp_facts.py
@@ -16,12 +16,16 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: snmp_facts
version_added: "1.9"
author: "Patrick Ogenstad (@ogenstad)"
-short_description: Retrive facts for a device using SNMP.
+short_description: Retrieve facts for a device using SNMP.
description:
- Retrieve facts for a device using SNMP, the facts will be
inserted to the ansible_facts key.
@@ -72,19 +76,22 @@
EXAMPLES = '''
# Gather facts with SNMP version 2
-- snmp_facts: host={{ inventory_hostname }} version=2c community=public
- connection: local
+- snmp_facts:
+ host: '{{ inventory_hostname }}'
+ version: 2c
+ community: public
+ delegate_to: local
# Gather facts using SNMP version 3
- snmp_facts:
- host={{ inventory_hostname }}
- version=v3
- level=authPriv
- integrity=sha
- privacy=aes
- username=snmp-user
- authkey=abc12345
- privkey=def6789
+ host: '{{ inventory_hostname }}'
+ version: v3
+ level: authPriv
+ integrity: sha
+ privacy: aes
+ username: snmp-user
+ authkey: abc12345
+ privkey: def6789
delegate_to: localhost
'''
@@ -112,7 +119,7 @@ def __init__(self,dotprefix=False):
self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
self.sysName = dp + "1.3.6.1.2.1.1.5.0"
self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
-
+
# From IF-MIB
self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
@@ -127,10 +134,10 @@ def __init__(self,dotprefix=False):
self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
-
+
def decode_hex(hexstring):
-
+
if len(hexstring) < 3:
return hexstring
if hexstring[:2] == "0x":
@@ -153,7 +160,7 @@ def lookup_adminstatus(int_adminstatus):
2: 'down',
3: 'testing'
}
- if int_adminstatus in adminstatus_options.keys():
+ if int_adminstatus in adminstatus_options:
return adminstatus_options[int_adminstatus]
else:
return ""
@@ -168,7 +175,7 @@ def lookup_operstatus(int_operstatus):
6: 'notPresent',
7: 'lowerLayerDown'
}
- if int_operstatus in operstatus_options.keys():
+ if int_operstatus in operstatus_options:
return operstatus_options[int_operstatus]
else:
return ""
@@ -200,7 +207,7 @@ def main():
if m_args['version'] == "v2" or m_args['version'] == "v2c":
if m_args['community'] == False:
module.fail_json(msg='Community not set when using snmp version 2')
-
+
if m_args['version'] == "v3":
if m_args['username'] == None:
module.fail_json(msg='Username not set when using snmp version 3')
@@ -208,7 +215,7 @@ def main():
if m_args['level'] == "authPriv" and m_args['privacy'] == None:
module.fail_json(msg='Privacy algorithm not set when using authPriv')
-
+
if m_args['integrity'] == "sha":
integrity_proto = cmdgen.usmHMACSHAAuthProtocol
elif m_args['integrity'] == "md5":
@@ -218,7 +225,7 @@ def main():
privacy_proto = cmdgen.usmAesCfb128Protocol
elif m_args['privacy'] == "des":
privacy_proto = cmdgen.usmDESPrivProtocol
-
+
# Use SNMP Version 2
if m_args['version'] == "v2" or m_args['version'] == "v2c":
snmp_auth = cmdgen.CommunityData(m_args['community'])
@@ -237,18 +244,19 @@ def main():
v = DefineOid(dotprefix=False)
Tree = lambda: defaultdict(Tree)
-
+
results = Tree()
-
+
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
snmp_auth,
cmdgen.UdpTransportTarget((m_args['host'], 161)),
cmdgen.MibVariable(p.sysDescr,),
- cmdgen.MibVariable(p.sysObjectId,),
+ cmdgen.MibVariable(p.sysObjectId,),
cmdgen.MibVariable(p.sysUpTime,),
- cmdgen.MibVariable(p.sysContact,),
+ cmdgen.MibVariable(p.sysContact,),
cmdgen.MibVariable(p.sysName,),
cmdgen.MibVariable(p.sysLocation,),
+ lookupMib=False
)
@@ -273,7 +281,7 @@ def main():
errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
snmp_auth,
- cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
cmdgen.MibVariable(p.ifIndex,),
cmdgen.MibVariable(p.ifDescr,),
cmdgen.MibVariable(p.ifMtu,),
@@ -281,20 +289,21 @@ def main():
cmdgen.MibVariable(p.ifPhysAddress,),
cmdgen.MibVariable(p.ifAdminStatus,),
cmdgen.MibVariable(p.ifOperStatus,),
- cmdgen.MibVariable(p.ipAdEntAddr,),
- cmdgen.MibVariable(p.ipAdEntIfIndex,),
- cmdgen.MibVariable(p.ipAdEntNetMask,),
+ cmdgen.MibVariable(p.ipAdEntAddr,),
+ cmdgen.MibVariable(p.ipAdEntIfIndex,),
+ cmdgen.MibVariable(p.ipAdEntNetMask,),
cmdgen.MibVariable(p.ifAlias,),
+ lookupMib=False
)
-
+
if errorIndication:
module.fail_json(msg=str(errorIndication))
interface_indexes = []
-
- all_ipv4_addresses = []
+
+ all_ipv4_addresses = []
ipv4_networks = Tree()
for varBinds in varTable:
@@ -358,9 +367,9 @@ def main():
results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
-
+
module.exit_json(ansible_facts=results)
-
-main()
+if __name__ == '__main__':
+ main()
diff --git a/network/wakeonlan.py b/network/wakeonlan.py
new file mode 100644
index 00000000000..d49118d60ba
--- /dev/null
+++ b/network/wakeonlan.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Dag Wieers
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: wakeonlan
+version_added: 2.2
+short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
+description:
+ - The M(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
+options:
+ mac:
+ description:
+ - MAC address to send Wake-on-LAN broadcast packet for
+ required: true
+ default: null
+ broadcast:
+ description:
+ - Network broadcast address to use for broadcasting magic Wake-on-LAN packet
+ required: false
+ default: 255.255.255.255
+ port:
+ description:
+ - UDP port to use for magic Wake-on-LAN packet
+ required: false
+ default: 7
+author: "Dag Wieers (@dagwieers)"
+todo:
+ - Add arping support to check whether the system is up (before and after)
+ - Enable check-mode support (when we have arping support)
+ - Does not have SecureOn password support
+notes:
+ - This module sends a magic packet, without knowing whether it worked
+ - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
+ - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first) when turned off
+'''
+
+EXAMPLES = '''
+# Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
+- wakeonlan:
+ mac: '00:00:5E:00:53:66'
+ broadcast: 192.0.2.23
+ delegate_to: loclahost
+
+- wakeonlan:
+ mac: 00:00:5E:00:53:66
+ port: 9
+ delegate_to: localhost
+'''
+
+RETURN='''
+# Default return values
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+import socket
+import struct
+
+
+def wakeonlan(module, mac, broadcast, port):
+ """ Send a magic Wake-on-LAN packet. """
+
+ mac_orig = mac
+
+ # Remove possible seperator from MAC address
+ if len(mac) == 12 + 5:
+ mac = mac.replace(mac[2], '')
+
+ # If we don't end up with 12 hexadecimal characters, fail
+ if len(mac) != 12:
+ module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
+
+ # Test if it converts to an integer, otherwise fail
+ try:
+ int(mac, 16)
+ except ValueError:
+ module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
+
+ # Create payload for magic packet
+ data = ''
+ padding = ''.join(['FFFFFFFFFFFF', mac * 20])
+ for i in range(0, len(padding), 2):
+ data = ''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
+
+ # Broadcast payload to network
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+ try:
+ sock.sendto(data, (broadcast, port))
+ except socket.error:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ mac = dict(required=True, type='str'),
+ broadcast = dict(required=False, default='255.255.255.255'),
+ port = dict(required=False, type='int', default=7),
+ ),
+ )
+
+ mac = module.params.get('mac')
+ broadcast = module.params.get('broadcast')
+ port = module.params.get('port')
+
+ wakeonlan(module, mac, broadcast, port)
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/notification/campfire.py b/notification/campfire.py
index 68e64f1bc94..8a7b44355f4 100644
--- a/notification/campfire.py
+++ b/notification/campfire.py
@@ -15,6 +15,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: campfire
@@ -23,7 +27,6 @@
description:
- Send a message to Campfire.
- Messages with newlines will result in a "Paste" message being sent.
-version_added: "1.2"
options:
subscription:
description:
@@ -61,10 +64,18 @@
'''
EXAMPLES = '''
-- campfire: subscription=foo token=12345 room=123 msg="Task completed."
+- campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ msg: Task completed.
-- campfire: subscription=foo token=12345 room=123 notify=loggins
- msg="Task completed ... with feeling."
+- campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ notify: loggins
+ msg: Task completed ... with feeling.
'''
import cgi
@@ -74,7 +85,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
subscription=dict(required=True),
- token=dict(required=True),
+ token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
notify=dict(required=False,
@@ -118,14 +129,14 @@ def main():
# Send some audible notification if requested
if notify:
response, info = fetch_url(module, target_url, data=NSTR % cgi.escape(notify), headers=headers)
- if info['status'] != 200:
- module.fail_json(msg="unable to send msg: '%s', campfire api"
- " returned error code: '%s'" %
- (notify, info['status']))
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (notify, info['status']))
# Send the message
response, info = fetch_url(module, target_url, data=MSTR %cgi.escape(msg), headers=headers)
- if info['status'] != 200:
+ if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(msg, info['status']))
diff --git a/notification/flowdock.py b/notification/flowdock.py
index 34dad8db375..e0584295afa 100644
--- a/notification/flowdock.py
+++ b/notification/flowdock.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: flowdock
@@ -89,18 +93,20 @@
'''
EXAMPLES = '''
-- flowdock: type=inbox
- token=AAAAAA
- from_address=user@example.com
- source='my cool app'
- msg='test from ansible'
- subject='test subject'
-
-- flowdock: type=chat
- token=AAAAAA
- external_user_name=testuser
- msg='test from ansible'
- tags=tag1,tag2,tag3
+- flowdock:
+ type: inbox
+ token: AAAAAA
+ from_address: user@example.com
+ source: my cool app
+ msg: test from ansible
+ subject: test subject
+
+- flowdock:
+ type: chat
+ token: AAAAAA
+ external_user_name: testuser
+ msg: test from ansible
+ tags: tag1,tag2,tag3
'''
import urllib
@@ -113,7 +119,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- token=dict(required=True),
+ token=dict(required=True, no_log=True),
msg=dict(required=True),
type=dict(required=True, choices=["inbox","chat"]),
external_user_name=dict(required=False),
@@ -189,5 +195,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
-
+if __name__ == '__main__':
+ main()
diff --git a/notification/grove.py b/notification/grove.py
index 4e4a0b5b684..fe16289a220 100644
--- a/notification/grove.py
+++ b/notification/grove.py
@@ -1,5 +1,24 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -76,7 +95,7 @@ def do_notify_grove(module, channel_token, service, message, url=None, icon_url=
def main():
module = AnsibleModule(
argument_spec = dict(
- channel_token = dict(type='str', required=True),
+ channel_token = dict(type='str', required=True, no_log=True),
message = dict(type='str', required=True),
service = dict(type='str', default='ansible'),
url = dict(type='str', default=None),
@@ -99,4 +118,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/notification/hall.py b/notification/hall.py
index 05c1a981b73..d8766412d01 100755
--- a/notification/hall.py
+++ b/notification/hall.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
module: hall
short_description: Send notification to Hall
@@ -60,7 +64,7 @@
room_token:
title: Server Creation
msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region."
- with_items: ec2.instances
+ with_items: "{{ ec2.instances }}"
"""
HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s'
@@ -94,4 +98,6 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/notification/hipchat.py b/notification/hipchat.py
index f565ca9cdfc..f321a6b9141 100644
--- a/notification/hipchat.py
+++ b/notification/hipchat.py
@@ -15,6 +15,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: hipchat
@@ -81,15 +85,16 @@
'''
EXAMPLES = '''
-- hipchat: room=notify msg="Ansible task finished"
+- hipchat:
+ room: notif
+ msg: Ansible task finished
# Use Hipchat API version 2
-
- hipchat:
- api: "https://api.hipchat.com/v2/"
+ api: 'https://api.hipchat.com/v2/'
token: OAUTH2_TOKEN
room: notify
- msg: "Ansible task finished"
+ msg: Ansible task finished
'''
# ===========================================
@@ -97,6 +102,15 @@
#
import urllib
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import fetch_url
DEFAULT_URI = "https://api.hipchat.com/v1"
@@ -104,10 +118,10 @@
NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
+
def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
- color='yellow', notify=False, api=MSG_URI_V1):
+ color='yellow', notify=False, api=MSG_URI_V1):
'''sending message to hipchat v1 server'''
- print "Sending message to v1 server"
params = {}
params['room_id'] = room
@@ -133,11 +147,10 @@ def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
- color='yellow', notify=False, api=NOTIFY_URI_V2):
+ color='yellow', notify=False, api=NOTIFY_URI_V2):
'''sending message to hipchat v2 server'''
- print "Sending message to v2 server"
- headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'}
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
body = dict()
body['message'] = msg
@@ -147,7 +160,7 @@ def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
POST_URL = api + NOTIFY_URI_V2
- url = POST_URL.replace('{id_or_name}', room)
+ url = POST_URL.replace('{id_or_name}', urllib.pathname2url(room))
data = json.dumps(body)
if module.check_mode:
@@ -155,7 +168,10 @@ def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
module.exit_json(changed=False)
response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
- if info['status'] == 200:
+
+ # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows
+ # 204 to be the expected result code.
+ if info['status'] in [200, 204]:
return response.read()
else:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
@@ -169,7 +185,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- token=dict(required=True),
+ token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
msg_from=dict(default="Ansible", aliases=['from']),
@@ -184,7 +200,7 @@ def main():
)
token = module.params["token"]
- room = module.params["room"]
+ room = str(module.params["room"])
msg = module.params["msg"]
msg_from = module.params["msg_from"]
color = module.params["color"]
@@ -197,14 +213,12 @@ def main():
send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
else:
send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg="unable to send msg: %s" % e)
changed = True
module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-
-main()
+if __name__ == '__main__':
+ main()
diff --git a/notification/irc.py b/notification/irc.py
index 28ad4417ac1..d2fa22a4f52 100644
--- a/notification/irc.py
+++ b/notification/irc.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: irc
@@ -56,9 +60,11 @@
color:
description:
- Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
+ Added 11 more colors in version 2.0.
required: false
default: "none"
- choices: [ "none", "yellow", "red", "green", "blue", "black" ]
+ choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray", "light_gray"]
channel:
description:
- Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
@@ -95,6 +101,13 @@
Useful for when using a faux bot and not wanting join/parts between messages.
default: True
version_added: "2.0"
+ style:
+ description:
+ - Text style for the message. Note italic does not work on some clients
+ default: None
+ required: False
+ choices: [ "bold", "underline", "reverse", "italic" ]
+ version_added: "2.0"
# informational: requirements for nodes
requirements: [ socket ]
@@ -104,22 +117,29 @@
'''
EXAMPLES = '''
-- irc: server=irc.example.net channel="#t1" msg="Hello world"
+- irc:
+ server: irc.example.net
+ channel: "#t1"
+ msg: "Hello world"
-- local_action: irc port=6669
- server="irc.example.net"
- channel="#t1"
- msg="All finished at {{ ansible_date_time.iso8601 }}"
- color=red
- nick=ansibleIRC
+- local_action:
+ module: irc
+ port: 6669
+ server: "irc.example.net"
+ channel: "#t1"
+ msg: "All finished at {{ ansible_date_time.iso8601 }}"
+ color: red
+ nick: ansibleIRC
-- local_action: irc port=6669
- server="irc.example.net"
- channel="#t1"
- nick_to=["nick1", "nick2"]
- msg="All finished at {{ ansible_date_time.iso8601 }}"
- color=red
- nick=ansibleIRC
+- local_action:
+ module: irc
+ port: 6669
+ server: "irc.example.net"
+ channel: "#t1"
+ nick_to: ["nick1", "nick2"]
+ msg: "All finished at {{ ansible_date_time.iso8601 }}"
+ color: red
+ nick: ansibleIRC
'''
# ===========================================
@@ -134,24 +154,47 @@
def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=[], key=None, topic=None,
- nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True):
+ nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None):
'''send message to IRC'''
colornumbers = {
+ 'white': "00",
'black': "01",
+ 'blue': "02",
+ 'green': "03",
'red': "04",
- 'green': "09",
+ 'brown': "05",
+ 'purple': "06",
+ 'orange': "07",
'yellow': "08",
- 'blue': "12",
+ 'light_green': "09",
+ 'teal': "10",
+ 'light_cyan': "11",
+ 'light_blue': "12",
+ 'pink': "13",
+ 'gray': "14",
+ 'light_gray': "15",
+ }
+
+ stylechoices = {
+ 'bold': "\x02",
+ 'underline': "\x1F",
+ 'reverse': "\x16",
+ 'italic': "\x1D",
}
+ try:
+ styletext = stylechoices[style]
+ except:
+ styletext = ""
+
try:
colornumber = colornumbers[color]
colortext = "\x03" + colornumber
except:
colortext = ""
- message = colortext + msg
+ message = styletext + colortext + msg
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if use_ssl:
@@ -215,16 +258,21 @@ def main():
module = AnsibleModule(
argument_spec=dict(
server=dict(default='localhost'),
- port=dict(default=6667),
+ port=dict(type='int', default=6667),
nick=dict(default='ansible'),
nick_to=dict(required=False, type='list'),
msg=dict(required=True),
- color=dict(default="none", choices=["yellow", "red", "green",
- "blue", "black", "none"]),
+ color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
+ "green", "red", "brown",
+ "purple", "orange", "yellow",
+ "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray",
+ "light_gray", "none"]),
+ style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
channel=dict(required=False),
- key=dict(),
+ key=dict(no_log=True),
topic=dict(),
- passwd=dict(),
+ passwd=dict(no_log=True),
timeout=dict(type='int', default=30),
part=dict(type='bool', default=True),
use_ssl=dict(type='bool', default=False)
@@ -248,10 +296,12 @@ def main():
timeout = module.params["timeout"]
use_ssl = module.params["use_ssl"]
part = module.params["part"]
+ style = module.params["style"]
try:
- send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part)
- except Exception, e:
+ send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style)
+ except Exception:
+ e = get_exception()
module.fail_json(msg="unable to send to IRC: %s" % e)
module.exit_json(changed=False, channel=channel, nick=nick,
@@ -259,4 +309,7 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/notification/jabber.py b/notification/jabber.py
index 6d97e4232df..f68790fb296 100644
--- a/notification/jabber.py
+++ b/notification/jabber.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
version_added: "1.2"
@@ -66,24 +70,27 @@
EXAMPLES = '''
# send a message to a user
-- jabber: user=mybot@example.net
- password=secret
- to=friend@example.net
- msg="Ansible task finished"
+- jabber:
+ user: mybot@example.net
+ password: secret
+ to: friend@example.net
+ msg: Ansible task finished
# send a message to a room
-- jabber: user=mybot@example.net
- password=secret
- to=mychaps@conference.example.net/ansiblebot
- msg="Ansible task finished"
+- jabber:
+ user: mybot@example.net
+ password: secret
+ to: mychaps@conference.example.net/ansiblebot
+ msg: Ansible task finished
# send a message, specifying the host and port
-- jabber user=mybot@example.net
- host=talk.example.net
- port=5223
- password=secret
- to=mychaps@example.net
- msg="Ansible task finished"
+- jabber
+ user: mybot@example.net
+ host: talk.example.net
+ port: 5223
+ password: secret
+ to: mychaps@example.net
+ msg: Ansible task finished
'''
import os
@@ -101,7 +108,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True),
- password=dict(required=True),
+ password=dict(required=True, no_log=True),
to=dict(required=True),
msg=dict(required=True),
host=dict(required=False),
@@ -134,7 +141,7 @@ def main():
msg = xmpp.protocol.Message(body=module.params['msg'])
try:
- conn=xmpp.Client(server)
+ conn=xmpp.Client(server, debug=[])
if not conn.connect(server=(host,port)):
module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
if not conn.auth(user,password,'Ansible'):
@@ -155,11 +162,15 @@ def main():
conn.send(msg)
time.sleep(1)
conn.disconnect()
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg="unable to send msg: %s" % e)
module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
# import module snippets
from ansible.module_utils.basic import *
-main()
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/notification/mail.py b/notification/mail.py
index 8be9a589cbf..51902f3f87f 100644
--- a/notification/mail.py
+++ b/notification/mail.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
author: "Dag Wieers (@dagwieers)"
@@ -120,37 +124,42 @@
EXAMPLES = '''
# Example playbook sending mail to root
-- local_action: mail subject='System {{ ansible_hostname }} has been successfully provisioned.'
+- mail:
+ subject: 'System {{ ansible_hostname }} has been successfully provisioned.'
+ delegate_to: localhost
# Sending an e-mail using Gmail SMTP servers
-- local_action: mail
- host='smtp.gmail.com'
- port=587
- username=username@gmail.com
- password='mysecret'
- to="John Smith "
- subject='Ansible-report'
- body='System {{ ansible_hostname }} has been successfully provisioned.'
+- mail:
+ host: smtp.gmail.com
+ port: 587
+ username: username@gmail.com
+ password: mysecret
+ to: John Smith
+ subject: Ansible-report
+ body: 'System {{ ansible_hostname }} has been successfully provisioned.'
+ delegate_to: localhost
# Send e-mail to a bunch of users, attaching files
-- local_action: mail
- host='127.0.0.1'
- port=2025
- subject="Ansible-report"
- body="Hello, this is an e-mail. I hope you like it ;-)"
- from="jane@example.net (Jane Jolie)"
- to="John Doe , Suzie Something "
- cc="Charlie Root "
- attach="/etc/group /tmp/pavatar2.png"
- headers=Reply-To=john@example.com|X-Special="Something or other"
- charset=utf8
+- mail:
+ host: 127.0.0.1
+ port: 2025
+ subject: Ansible-report
+ body: Hello, this is an e-mail. I hope you like it ;-)
+ from: jane@example.net (Jane Jolie)
+ to: John Doe , Suzie Something
+ cc: Charlie Root
+ attach: /etc/group /tmp/pavatar2.png
+ headers: 'Reply-To=john@example.com|X-Special="Something or other"'
+ charset: utf8
+ delegate_to: localhost
+
# Sending an e-mail using the remote machine, not the Ansible controller node
- mail:
- host='localhost'
- port=25
- to="John Smith "
- subject='Ansible-report'
- body='System {{ ansible_hostname }} has been successfully provisioned.'
+ host: localhost
+ port: 25
+ to: John Smith
+ subject: Ansible-report
+ body: 'System {{ ansible_hostname }} has been successfully provisioned.'
'''
import os
@@ -178,7 +187,7 @@ def main():
module = AnsibleModule(
argument_spec = dict(
username = dict(default=None),
- password = dict(default=None),
+ password = dict(default=None, no_log=True),
host = dict(default='localhost'),
port = dict(default='25'),
sender = dict(default='root', aliases=['from']),
@@ -218,7 +227,8 @@ def main():
smtp = smtplib.SMTP_SSL(host, port=int(port))
except (smtplib.SMTPException, ssl.SSLError):
smtp = smtplib.SMTP(host, port=int(port))
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(rc=1, msg='Failed to send mail to server %s on port %s: %s' % (host, port, e))
smtp.ehlo()
@@ -283,15 +293,16 @@ def main():
part.add_header('Content-disposition', 'attachment', filename=os.path.basename(file))
msg.attach(part)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(rc=1, msg="Failed to send mail: can't attach file %s: %s" % (file, e))
- sys.exit()
composed = msg.as_string()
try:
smtp.sendmail(sender_addr, set(addr_list), composed)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(rc=1, msg='Failed to send mail to %s: %s' % (", ".join(addr_list), e))
smtp.quit()
@@ -300,4 +311,7 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/notification/mqtt.py b/notification/mqtt.py
index c618ab69ae3..b13124b4f01 100644
--- a/notification/mqtt.py
+++ b/notification/mqtt.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: mqtt
@@ -75,6 +79,36 @@
retained message immediately.
required: false
default: False
+ ca_certs:
+ description:
+ - The path to the Certificate Authority certificate files that are to be
+ treated as trusted by this client. If this is the only option given
+ then the client will operate in a similar manner to a web browser. That
+ is to say it will require the broker to have a certificate signed by the
+ Certificate Authorities in ca_certs and will communicate using TLS v1,
+ but will not attempt any form of authentication. This provides basic
+ network encryption but may not be sufficient depending on how the broker
+ is configured.
+ required: False
+ default: None
+ version_added: 2.3
+ certfile:
+ description:
+ - The path pointing to the PEM encoded client certificate. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ required: False
+ default: None
+ version_added: 2.3
+ keyfile:
+ description:
+ - The path pointing to the PEM encoded client private key. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ required: False
+ default: None
+ version_added: 2.3
+
# informational: requirements for nodes
requirements: [ mosquitto ]
@@ -113,14 +147,17 @@ def main():
module = AnsibleModule(
argument_spec=dict(
server = dict(default = 'localhost'),
- port = dict(default = 1883),
+ port = dict(default = 1883, type='int'),
topic = dict(required = True),
payload = dict(required = True),
client_id = dict(default = None),
qos = dict(default="0", choices=["0", "1", "2"]),
retain = dict(default=False, type='bool'),
username = dict(default = None),
- password = dict(default = None),
+ password = dict(default = None, no_log=True),
+ ca_certs = dict(default = None, type='path'),
+ certfile = dict(default = None, type='path'),
+ keyfile = dict(default = None, type='path'),
),
supports_check_mode=True
)
@@ -137,6 +174,9 @@ def main():
retain = module.params.get("retain")
username = module.params.get("username", None)
password = module.params.get("password", None)
+ ca_certs = module.params.get("ca_certs", None)
+ certfile = module.params.get("certfile", None)
+ keyfile = module.params.get("keyfile", None)
if client_id is None:
client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
@@ -148,6 +188,11 @@ def main():
if username is not None:
auth = { 'username' : username, 'password' : password }
+ tls=None
+ if ca_certs is not None:
+ tls = {'ca_certs': ca_certs, 'certfile': certfile,
+ 'keyfile': keyfile}
+
try:
rc = mqtt.single(topic, payload,
qos=qos,
@@ -155,12 +200,17 @@ def main():
client_id=client_id,
hostname=server,
port=port,
- auth=auth)
- except Exception, e:
+ auth=auth,
+ tls=tls)
+ except Exception:
+ e = get_exception()
module.fail_json(msg="unable to publish to MQTT broker %s" % (e))
module.exit_json(changed=False, topic=topic)
# import module snippets
from ansible.module_utils.basic import *
-main()
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/notification/nexmo.py b/notification/nexmo.py
index 89a246c0d90..9fafcc03769 100644
--- a/notification/nexmo.py
+++ b/notification/nexmo.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
module: nexmo
short_description: Send a SMS via nexmo
@@ -138,4 +142,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/notification/osx_say.py b/notification/osx_say.py
index 7c0ba844583..ff6d3ae0147 100644
--- a/notification/osx_say.py
+++ b/notification/osx_say.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: osx_say
@@ -43,7 +47,10 @@
'''
EXAMPLES = '''
-- local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox
+- osx_say:
+ msg: '{{ inventory_hostname }} is all done'
+ voice: Zarvox
+ delegate_to: localhost
'''
DEFAULT_VOICE='Trinoids'
@@ -73,4 +80,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/notification/pushbullet.py b/notification/pushbullet.py
index dfd89af577d..ed09be8f516 100644
--- a/notification/pushbullet.py
+++ b/notification/pushbullet.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
author: "Willy Barro (@willybarro)"
@@ -108,7 +112,7 @@
def main():
module = AnsibleModule(
argument_spec = dict(
- api_key = dict(type='str', required=True),
+ api_key = dict(type='str', required=True, no_log=True),
channel = dict(type='str', default=None),
device = dict(type='str', default=None),
push_type = dict(type='str', default="note", choices=['note', 'link']),
@@ -184,4 +188,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/notification/pushover.py b/notification/pushover.py
index 0c1d6e94ab9..294da075cec 100644
--- a/notification/pushover.py
+++ b/notification/pushover.py
@@ -20,11 +20,15 @@
###
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: pushover
version_added: "2.0"
-short_description: Send notifications via u(https://pushover.net)
+short_description: Send notifications via U(https://pushover.net)
description:
- Send notifications via pushover, to subscriber list of devices, and email
addresses. Requires pushover app on devices.
@@ -34,26 +38,30 @@
options:
msg:
description:
- What message you wish to send.
+ - What message you wish to send.
required: true
app_token:
description:
- Pushover issued token identifying your pushover app.
+ - Pushover issued token identifying your pushover app.
required: true
user_key:
description:
- Pushover issued authentication key for your user.
+ - Pushover issued authentication key for your user.
required: true
pri:
- description: Message priority (see u(https://pushover.net) for details.)
+ description:
+ - Message priority (see U(https://pushover.net) for details.)
required: false
author: "Jim Richardson (@weaselkeeper)"
'''
EXAMPLES = '''
-- local_action: pushover msg="{{inventory_hostname}} has exploded in flames,
- It is now time to panic" app_token=wxfdksl user_key=baa5fe97f2c5ab3ca8f0bb59
+- pushover:
+ msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic'
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ delegate_to: localhost
'''
import urllib
@@ -94,19 +102,19 @@ def main():
module = AnsibleModule(
argument_spec=dict(
msg=dict(required=True),
- app_token=dict(required=True),
- user_key=dict(required=True),
- pri=dict(required=False, default=0),
+ app_token=dict(required=True, no_log=True),
+ user_key=dict(required=True, no_log=True),
+ pri=dict(required=False, default='0', choices=['-2','-1','0','1','2']),
),
)
msg_object = Pushover(module, module.params['user_key'], module.params['app_token'])
try:
- msg_object.run(module.params['pri'], module.params['msg'])
+ response = msg_object.run(module.params['pri'], module.params['msg'])
except:
module.fail_json(msg='Unable to send msg via pushover')
- module.exit_json(msg=msg, changed=False)
+ module.exit_json(msg='message sent successfully: %s' % response, changed=False)
# import module snippets
from ansible.module_utils.basic import *
diff --git a/notification/rocketchat.py b/notification/rocketchat.py
new file mode 100644
index 00000000000..f7089f7984f
--- /dev/null
+++ b/notification/rocketchat.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Deepak Kothandan
+# (c) 2015, Stefan Berggren
+# (c) 2014, Ramon de la Fuente
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+module: rocketchat
+short_description: Send notifications to Rocket Chat
+description:
+ - The M(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration
+version_added: "2.2"
+author: "Ramon de la Fuente (@ramondelafuente)"
+options:
+ domain:
+ description:
+ - The domain for your environment without protocol. (i.e.
+ C(subdomain.domain.com or chat.domain.tld))
+ required: true
+ token:
+ description:
+ - Rocket Chat Incoming Webhook integration token. This provides
+ authentication to Rocket Chat's Incoming webhook for posting
+ messages.
+ required: true
+ protocol:
+ description:
+ - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https)
+ required: false
+ default: https
+ choices:
+ - 'http'
+ - 'https'
+ msg:
+ description:
+ - Message to be sent.
+ required: false
+ default: None
+ channel:
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token)
+ specifed during the creation of webhook.
+ required: false
+ default: None
+ username:
+ description:
+ - This is the sender of the message.
+ required: false
+ default: "Ansible"
+ icon_url:
+ description:
+ - URL for the message sender's icon.
+ required: false
+ default: "https://www.ansible.com/favicon.ico"
+ icon_emoji:
+ description:
+ - Emoji for the message sender. The representation for the available emojis can be
+ got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used)
+ required: false
+ default: None
+ link_names:
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ required: false
+ default: 1
+ choices:
+ - 1
+ - 0
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices:
+ - 'yes'
+ - 'no'
+ color:
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
+ required: false
+ default: 'normal'
+ choices:
+ - 'normal'
+ - 'good'
+ - 'warning'
+ - 'danger'
+ attachments:
+ description:
+ - Define a list of attachments.
+ required: false
+ default: None
+"""
+
+EXAMPLES = """
+- name: Send notification message via Rocket Chat
+ local_action:
+ module: rocketchat
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: "{{ inventory_hostname }} completed"
+
+- name: Send notification message via Rocket Chat all options
+ local_action:
+ module: rocketchat
+ domain: chat.example.com
+ token: thetoken/generatedby/rocketchat
+ msg: "{{ inventory_hostname }} completed"
+ channel: "#ansible"
+ username: "Ansible on {{ inventory_hostname }}"
+ icon_url: "http://www.example.com/some-image-file.png"
+ link_names: 0
+
+- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat
+ rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: "{{ inventory_hostname }} is alive!"
+ color: good
+ username: ""
+ icon_url: ""
+
+- name: Use the attachments API
+ rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ attachments:
+ - text: "Display my system load on host A and B"
+ color: "#ff00dd"
+ title: "System load"
+ fields:
+ - title: "System A"
+ value: "load average: 0,74, 0,66, 0,63"
+ short: "true"
+ - title: "System B"
+ value: "load average: 5,16, 4,64, 2,43"
+ short: "true"
+
+"""
+
+RETURN = """
+changed:
+ description: A flag indicating if any change was made or not.
+ returned: success
+ type: boolean
+ sample: false
+"""
+
+ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s'
+
+def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=text)
+ elif text is not None:
+ payload = dict(attachments=[dict(text=text, color=color)])
+ if channel is not None:
+ if (channel[0] == '#') or (channel[0] == '@'):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ for attachment in attachments:
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+ payload['attachments'].append(attachment)
+
+ payload="payload=" + module.jsonify(payload)
+ return payload
+
+def do_notify_rocketchat(module, domain, token, protocol, payload):
+
+ if token.count('/') < 1:
+ module.fail_json(msg="Invalid Token specified, provide a valid token")
+
+ rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token)
+
+ response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ domain = dict(type='str', required=True, default=None),
+ token = dict(type='str', required=True, no_log=True),
+ protocol = dict(type='str', default='https', choices=['http', 'https']),
+ msg = dict(type='str', required=False, default=None),
+ channel = dict(type='str', default=None),
+ username = dict(type='str', default='Ansible'),
+ icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji = dict(type='str', default=None),
+ link_names = dict(type='int', default=1, choices=[0,1]),
+ validate_certs = dict(default='yes', type='bool'),
+ color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
+ attachments = dict(type='list', required=False, default=None)
+ )
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ protocol = module.params['protocol']
+ text = module.params['msg']
+ channel = module.params['channel']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ color = module.params['color']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments)
+ do_notify_rocketchat(module, domain, token, protocol, payload)
+
+ module.exit_json(msg="OK")
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+if __name__ == '__main__':
+ main()
diff --git a/notification/sendgrid.py b/notification/sendgrid.py
index 2655b4248bb..b0821983dc7 100644
--- a/notification/sendgrid.py
+++ b/notification/sendgrid.py
@@ -18,41 +18,96 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
version_added: "2.0"
module: sendgrid
short_description: Sends an email with the SendGrid API
description:
- - Sends an email with a SendGrid account through their API, not through
- the SMTP service.
+ - "Sends an email with a SendGrid account through their API, not through
+ the SMTP service."
notes:
- - This module is non-idempotent because it sends an email through the
- external API. It is idempotent only in the case that the module fails.
- - Like the other notification modules, this one requires an external
+ - "This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails."
+ - "Like the other notification modules, this one requires an external
dependency to work. In this case, you'll need an active SendGrid
- account.
+ account."
+ - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers
+ you must pip install sendgrid"
+ - "since 2.2 username and password are not required if you supply an api_key"
+requirements:
+ - sendgrid python library
options:
username:
description:
- username for logging into the SendGrid account
- required: true
+ - username for logging into the SendGrid account.
+ - Since 2.2 it is only required if api_key is not supplied.
+ required: false
+ default: null
password:
- description: password that corresponds to the username
- required: true
+ description:
+ - password that corresponds to the username
+ - Since 2.2 it is only required if api_key is not supplied.
+ required: false
+ default: null
from_address:
description:
- the address in the "from" field for the email
+ - the address in the "from" field for the email
required: true
to_addresses:
description:
- a list with one or more recipient email addresses
+ - a list with one or more recipient email addresses
required: true
subject:
description:
- the desired subject for the email
+ - the desired subject for the email
required: true
-
+ api_key:
+ description:
+ - sendgrid API key to use instead of username/password
+ version_added: 2.2
+ required: false
+ default: null
+ cc:
+ description:
+ - a list of email addresses to cc
+ version_added: 2.2
+ required: false
+ default: null
+ bcc:
+ description:
+ - a list of email addresses to bcc
+ version_added: 2.2
+ required: false
+ default: null
+ attachments:
+ description:
+ - a list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs)
+ version_added: 2.2
+ required: false
+ default: null
+ from_name:
+ description:
+ - the name you want to appear in the from field, i.e 'John Doe'
+ version_added: 2.2
+ required: false
+ default: null
+ html_body:
+ description:
+ - whether the body is html content that should be rendered
+ version_added: 2.2
+ required: false
+ default: false
+ headers:
+ description:
+ - a dict to pass on as headers
+ version_added: 2.2
+ required: false
+ default: null
author: "Matt Makai (@makaimc)"
'''
@@ -86,26 +141,72 @@
#
import urllib
+try:
+ import sendgrid
+ HAS_SENDGRID = True
+except ImportError:
+ HAS_SENDGRID = False
+
def post_sendgrid_api(module, username, password, from_address, to_addresses,
- subject, body):
- SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
- AGENT = "Ansible"
- data = {'api_user': username, 'api_key':password,
- 'from':from_address, 'subject': subject, 'text': body}
- encoded_data = urllib.urlencode(data)
- to_addresses_api = ''
- for recipient in to_addresses:
- if isinstance(recipient, unicode):
- recipient = recipient.encode('utf-8')
- to_addresses_api += '&to[]=%s' % recipient
- encoded_data += to_addresses_api
-
- headers = { 'User-Agent': AGENT,
- 'Content-type': 'application/x-www-form-urlencoded',
- 'Accept': 'application/json'}
- return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST')
+ subject, body, api_key=None, cc=None, bcc=None, attachments=None,
+ html_body=False, from_name=None, headers=None):
+
+ if not HAS_SENDGRID:
+ SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
+ AGENT = "Ansible"
+ data = {'api_user': username, 'api_key':password,
+ 'from':from_address, 'subject': subject, 'text': body}
+ encoded_data = urllib.urlencode(data)
+ to_addresses_api = ''
+ for recipient in to_addresses:
+ if isinstance(recipient, unicode):
+ recipient = recipient.encode('utf-8')
+ to_addresses_api += '&to[]=%s' % recipient
+ encoded_data += to_addresses_api
+
+ headers = { 'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json'}
+ return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST')
+ else:
+
+ if api_key:
+ sg = sendgrid.SendGridClient(api_key)
+ else:
+ sg = sendgrid.SendGridClient(username, password)
+
+ message = sendgrid.Mail()
+ message.set_subject(subject)
+
+ for recip in to_addresses:
+ message.add_to(recip)
+ if cc:
+ for recip in cc:
+ message.add_cc(recip)
+ if bcc:
+ for recip in bcc:
+ message.add_bcc(recip)
+ if headers:
+ message.set_headers(headers)
+
+ if attachments:
+ for f in attachments:
+ name = os.path.basename(f)
+ message.add_attachment(name, f)
+
+ if from_name:
+ message.set_from('%s <%s.' % (from_name, from_address))
+ else:
+ message.set_from(from_address)
+
+ if html_body:
+ message.set_html(body)
+ else:
+ message.set_text(body)
+
+ return sg.send(message)
# =======================================
# Main
#
@@ -113,28 +214,57 @@ def post_sendgrid_api(module, username, password, from_address, to_addresses,
def main():
module = AnsibleModule(
argument_spec=dict(
- username=dict(required=True),
- password=dict(required=True, no_log=True),
+ username=dict(required=False),
+ password=dict(required=False, no_log=True),
+ api_key=dict(required=False, no_log=True),
+ bcc=dict(required=False, type='list'),
+ cc=dict(required=False, type='list'),
+ headers=dict(required=False, type='dict'),
from_address=dict(required=True),
+ from_name=dict(required=False),
to_addresses=dict(required=True, type='list'),
subject=dict(required=True),
body=dict(required=True),
+ html_body=dict(required=False, default=False, type='bool'),
+ attachments=dict(required=False, type='list')
),
- supports_check_mode=True
+ supports_check_mode=True,
+ mutually_exclusive = [
+ ['api_key', 'password'],
+ ['api_key', 'username']
+ ],
+ required_together = [['username', 'password']],
)
username = module.params['username']
password = module.params['password']
+ api_key = module.params['api_key']
+ bcc = module.params['bcc']
+ cc = module.params['cc']
+ headers = module.params['headers']
+ from_name = module.params['from_name']
from_address = module.params['from_address']
to_addresses = module.params['to_addresses']
subject = module.params['subject']
body = module.params['body']
+ html_body = module.params['html_body']
+ attachments = module.params['attachments']
+
+ sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments]
+
+ if any(lib_arg != None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID:
+ module.fail_json(msg='You must install the sendgrid python library if you want to use any of the following arguments: api_key, bcc, cc, headers, from_name, html_body, attachments')
response, info = post_sendgrid_api(module, username, password,
- from_address, to_addresses, subject, body)
- if info['status'] != 200:
- module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg'])
+ from_address, to_addresses, subject, body, attachments=attachments,
+ bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key)
+ if not HAS_SENDGRID:
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg'])
+ else:
+ if response != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message'])
module.exit_json(msg=subject, changed=False)
diff --git a/notification/slack.py b/notification/slack.py
index ba4ed2e4c2d..3d50e89df95 100644
--- a/notification/slack.py
+++ b/notification/slack.py
@@ -1,6 +1,8 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+# (c) 2016, René Moser
+# (c) 2015, Stefan Berggren
# (c) 2014, Ramon de la Fuente
#
# This file is part of Ansible
@@ -18,12 +20,16 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
module: slack
short_description: Send Slack notifications
description:
- The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
-version_added: 1.6
+version_added: "1.6"
author: "Ramon de la Fuente (@ramondelafuente)"
options:
domain:
@@ -32,6 +38,7 @@
C(future500.slack.com)) In 1.8 and beyond, this is deprecated and may
be ignored. See token documentation for information.
required: false
+ default: None
token:
description:
- Slack integration token. This authenticates you to the slack service.
@@ -46,25 +53,28 @@
msg:
description:
- Message to send.
- required: true
+ required: false
+ default: None
channel:
description:
- Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
required: false
+ default: None
username:
description:
- This is the sender of the message.
required: false
- default: ansible
+ default: "Ansible"
icon_url:
description:
- - Url for the message sender's icon (default C(http://www.ansible.com/favicon.ico))
+ - Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico))
required: false
icon_emoji:
description:
- Emoji for the message sender. See Slack documentation for options.
(if I(icon_emoji) is set, I(icon_url) will not be used)
required: false
+ default: None
link_names:
description:
- Automatically create links for channels and usernames in I(msg).
@@ -77,6 +87,7 @@
description:
- Setting for the message parser at Slack
required: false
+ default: None
choices:
- 'full'
- 'none'
@@ -90,7 +101,7 @@
- 'yes'
- 'no'
color:
- version_added: 2.0
+ version_added: "2.0"
description:
- Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
required: false
@@ -100,21 +111,24 @@
- 'good'
- 'warning'
- 'danger'
+ attachments:
+ description:
+ - Define a list of attachments. This list mirrors the Slack JSON API. For more information, see https://api.slack.com/docs/attachments
+ required: false
+ default: None
"""
EXAMPLES = """
- name: Send notification message via Slack
local_action:
module: slack
- domain: future500.slack.com
- token: thetokengeneratedbyslack
+ token: thetoken/generatedby/slack
msg: "{{ inventory_hostname }} completed"
- name: Send notification message via Slack all options
local_action:
module: slack
- domain: future500.slack.com
- token: thetokengeneratedbyslack
+ token: thetoken/generatedby/slack
msg: "{{ inventory_hostname }} completed"
channel: "#ansible"
username: "Ansible on {{ inventory_hostname }}"
@@ -124,22 +138,60 @@
- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack
slack:
- domain: future500.slack.com
- token: thetokengeneratedbyslack
+ token: thetoken/generatedby/slack
msg: "{{ inventory_hostname }} is alive!"
color: good
username: ""
icon_url: ""
+
+- name: Use the attachments API
+ slack:
+ token: thetoken/generatedby/slack
+ attachments:
+ - text: "Display my system load on host A and B"
+ color: "#ff00dd"
+ title: "System load"
+ fields:
+ - title: "System A"
+ value: "load average: 0,74, 0,66, 0,63"
+ short: "true"
+ - title: "System B"
+ value: "load average: 5,16, 4,64, 2,43"
+ short: "true"
+
+- name: Send notification message via Slack (deprecated API using domain)
+ local_action:
+ module: slack
+ domain: future500.slack.com
+ token: thetokengeneratedbyslack
+ msg: "{{ inventory_hostname }} completed"
+
"""
OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
-def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color):
- if color == 'normal':
- payload = dict(text=text)
- else:
- payload = dict(attachments=[dict(text=text, color=color)])
+# See https://api.slack.com/docs/message-formatting#how_to_escape_characters
+# Escaping quotes and apostrophe however is related to how Ansible handles them.
+html_escape_table = {
+ '&': "&",
+ '>': ">",
+ '<': "<",
+ '"': "\"",
+ "'": "\'",
+}
+
+def html_escape(text):
+ '''Produce entities within text.'''
+ return "".join(html_escape_table.get(c,c) for c in text)
+
+def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color, attachments):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=html_escape(text))
+ elif text is not None:
+ # With a custom color we have to set the message as attachment, and explicitely turn markdown parsing on for it.
+ payload = dict(attachments=[dict(text=html_escape(text), color=color, mrkdwn_in=["text"])])
if channel is not None:
if (channel[0] == '#') or (channel[0] == '@'):
payload['channel'] = channel
@@ -156,7 +208,29 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj
if parse is not None:
payload['parse'] = parse
- payload="payload=" + module.jsonify(payload)
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ keys_to_escape = [
+ 'title',
+ 'text',
+ 'author_name',
+ 'pretext',
+ 'fallback',
+ ]
+ for attachment in attachments:
+ for key in keys_to_escape:
+ if key in attachment:
+ attachment[key] = html_escape(attachment[key])
+
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+
+ payload['attachments'].append(attachment)
+
+ payload=module.jsonify(payload)
return payload
def do_notify_slack(module, domain, token, payload):
@@ -168,7 +242,12 @@ def do_notify_slack(module, domain, token, payload):
module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form XXXX/YYYY/ZZZZ in your playbook")
slack_incoming_webhook = OLD_SLACK_INCOMING_WEBHOOK % (domain, token)
- response, info = fetch_url(module, slack_incoming_webhook, data=payload)
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+ response, info = fetch_url(module=module, url=slack_incoming_webhook, headers=headers, method='POST', data=payload)
+
if info['status'] != 200:
obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg']))
@@ -178,15 +257,16 @@ def main():
argument_spec = dict(
domain = dict(type='str', required=False, default=None),
token = dict(type='str', required=True, no_log=True),
- msg = dict(type='str', required=True),
+ msg = dict(type='str', required=False, default=None),
channel = dict(type='str', default=None),
username = dict(type='str', default='Ansible'),
- icon_url = dict(type='str', default='http://www.ansible.com/favicon.ico'),
+ icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'),
icon_emoji = dict(type='str', default=None),
link_names = dict(type='int', default=1, choices=[0,1]),
parse = dict(type='str', default=None, choices=['none', 'full']),
validate_certs = dict(default='yes', type='bool'),
- color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger'])
+ color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
+ attachments = dict(type='list', required=False, default=None)
)
)
@@ -200,8 +280,9 @@ def main():
link_names = module.params['link_names']
parse = module.params['parse']
color = module.params['color']
+ attachments = module.params['attachments']
- payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color)
+ payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color, attachments)
do_notify_slack(module, domain, token, payload)
module.exit_json(msg="OK")
@@ -209,4 +290,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/notification/sns.py b/notification/sns.py
index 5fd81e2047f..8e5a07dad63 100644
--- a/notification/sns.py
+++ b/notification/sns.py
@@ -18,13 +18,17 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
module: sns
short_description: Send Amazon Simple Notification Service (SNS) messages
description:
- The M(sns) module sends notifications to a topic on your Amazon SNS account
version_added: 1.6
-author: "Michael J. Schultz (@mjschultz)"
+author: "Michael J. Schultz (@mjschultz)"
options:
msg:
description:
@@ -61,7 +65,7 @@
required: false
aws_secret_key:
description:
- - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
+ - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_secret_key', 'secret_key']
@@ -77,8 +81,8 @@
required: false
aliases: ['aws_region', 'ec2_region']
-requirements: [ "boto" ]
-author: Michael J. Schultz
+requirements:
+ - "boto"
"""
EXAMPLES = """
@@ -98,10 +102,14 @@
topic: "deploy"
"""
-import sys
+try:
+ import json
+except ImportError:
+ import simplejson as json
-from ansible.module_utils.basic import *
-from ansible.module_utils.ec2 import *
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import ec2_argument_spec, connect_to_aws, get_aws_connection_info
+from ansible.module_utils.pycompat24 import get_exception
try:
import boto
@@ -157,7 +165,8 @@ def main():
module.fail_json(msg="region must be specified")
try:
connection = connect_to_aws(boto.sns, region, **aws_connect_params)
- except boto.exception.NoAuthHandlerFound, e:
+ except boto.exception.NoAuthHandlerFound:
+ e = get_exception()
module.fail_json(msg=str(e))
# .publish() takes full ARN topic id, but I'm lazy and type shortnames
@@ -186,9 +195,11 @@ def main():
try:
connection.publish(topic=arn_topic, subject=subject,
message_structure='json', message=json_msg)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError:
+ e = get_exception()
module.fail_json(msg=str(e))
module.exit_json(msg="OK")
-main()
+if __name__ == '__main__':
+ main()
diff --git a/notification/telegram.py b/notification/telegram.py
new file mode 100644
index 00000000000..57746cf06ae
--- /dev/null
+++ b/notification/telegram.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Artem Feofanov
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+
+module: telegram
+version_added: "2.2"
+author: "Artem Feofanov (@tyouxa)"
+
+short_description: module for sending notifications via telegram
+
+description:
+ - Send notifications via telegram bot, to a verified group or user
+notes:
+ - You will require a telegram account and create telegram bot to use this module.
+options:
+ msg:
+ description:
+ - What message you wish to send.
+ required: true
+ token:
+ description:
+ - Token identifying your telegram bot.
+ required: true
+ chat_id:
+ description:
+ - Telegram group or user chat_id
+ required: true
+
+"""
+
+EXAMPLES = """
+
+send a message to chat in playbook
+- telegram:
+ token: 'bot9999999:XXXXXXXXXXXXXXXXXXXXXXX'
+ chat_id: 000000
+ msg: Ansible task finished
+"""
+
+RETURN = """
+
+msg:
+ description: The message you attempted to send
+ returned: success
+ type: string
+ sample: "Ansible task finished"
+"""
+
+import urllib
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ token = dict(type='str',required=True,no_log=True),
+ chat_id = dict(type='str',required=True,no_log=True),
+ msg = dict(type='str',required=True)),
+ supports_check_mode=True
+ )
+
+ token = urllib.quote(module.params.get('token'))
+ chat_id = urllib.quote(module.params.get('chat_id'))
+ msg = urllib.quote(module.params.get('msg'))
+
+ url = 'https://api.telegram.org/' + token + '/sendMessage?text=' + msg + '&chat_id=' + chat_id
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url)
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+if __name__ == '__main__':
+ main()
diff --git a/notification/twilio.py b/notification/twilio.py
index 9ed1a09e12e..1d7e059e5c8 100644
--- a/notification/twilio.py
+++ b/notification/twilio.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
version_added: "1.6"
@@ -139,7 +143,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
account_sid=dict(required=True),
- auth_token=dict(required=True),
+ auth_token=dict(required=True, no_log=True),
msg=dict(required=True),
from_number=dict(required=True),
to_number=dict(required=True),
@@ -161,8 +165,12 @@ def main():
for number in to_number:
r, info = post_twilio_api(module, account_sid, auth_token, msg,
from_number, number, media_url)
- if info['status'] != 200:
- module.fail_json(msg="unable to send message to %s" % number)
+ if info['status'] not in [200, 201]:
+ body_message = "unknown error"
+ if 'body' in info:
+ body = json.loads(info['body'])
+ body_message = body['message']
+ module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
module.exit_json(msg=msg, changed=False)
diff --git a/notification/typetalk.py b/notification/typetalk.py
index 8a2dad3d6a2..f638be09ab2 100644
--- a/notification/typetalk.py
+++ b/notification/typetalk.py
@@ -15,6 +15,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: typetalk
@@ -44,7 +48,11 @@
'''
EXAMPLES = '''
-- typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed"
+- typetalk:
+ client_id: 12345
+ client_secret: 12345
+ topic: 1
+ msg: install completed
'''
import urllib
@@ -57,6 +65,11 @@
except ImportError:
json = None
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import fetch_url, ConnectionError
+
def do_request(module, url, params, headers=None):
data = urllib.urlencode(params)
@@ -72,6 +85,7 @@ def do_request(module, url, params, headers=None):
raise exc
return r
+
def get_access_token(module, client_id, client_secret):
params = {
'client_id': client_id,
@@ -95,7 +109,8 @@ def send_message(module, client_id, client_secret, topic, msg):
}
do_request(module, url, {'message': msg}, headers)
return True, {'access_token': access_token}
- except ConnectionError, e:
+ except ConnectionError:
+ e = get_exception()
return False, e
@@ -104,7 +119,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
client_id=dict(required=True),
- client_secret=dict(required=True),
+ client_secret=dict(required=True, no_log=True),
topic=dict(required=True, type='int'),
msg=dict(required=True),
),
@@ -126,8 +141,5 @@ def main():
module.exit_json(changed=True, topic=topic, msg=msg)
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
diff --git a/packaging/dpkg_selections.py b/packaging/dpkg_selections.py
index f09ff9a9f00..f26ad68f02d 100644
--- a/packaging/dpkg_selections.py
+++ b/packaging/dpkg_selections.py
@@ -1,4 +1,24 @@
#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -23,7 +43,9 @@
'''
EXAMPLES = '''
# Prevent python from being upgraded.
-- dpkg_selections: name=python selection=hold
+- dpkg_selections:
+ name: python
+ selection: hold
'''
def main():
@@ -57,4 +79,6 @@ def main():
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py
index 7b092a13667..8a165189625 100644
--- a/packaging/elasticsearch_plugin.py
+++ b/packaging/elasticsearch_plugin.py
@@ -22,6 +22,10 @@
along with Ansible. If not, see .
"""
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: elasticsearch_plugin
@@ -33,17 +37,17 @@
options:
name:
description:
- - Name of the plugin to install
+ - Name of the plugin to install. In ES 2.x, the name can be an url or file location
required: True
state:
description:
- Desired state of a plugin.
required: False
- choices: [present, absent]
+ choices: ["present", "absent"]
default: present
url:
description:
- - Set exact URL to download the plugin from
+ - Set exact URL to download the plugin from (Only works for ES 1.x)
required: False
default: None
timeout:
@@ -61,6 +65,18 @@
- Your configured plugin directory specified in Elasticsearch
required: False
default: /usr/share/elasticsearch/plugins/
+ proxy_host:
+ description:
+ - Proxy host to use during plugin installation
+ required: False
+ default: None
+ version_added: "2.1"
+ proxy_port:
+ description:
+ - Proxy port to use during plugin installation
+ required: False
+ default: None
+ version_added: "2.1"
version:
description:
- Version of the plugin to be installed.
@@ -71,15 +87,26 @@
EXAMPLES = '''
# Install Elasticsearch head plugin
-- elasticsearch_plugin: state=present name="mobz/elasticsearch-head"
+- elasticsearch_plugin:
+ state: present
+ name: mobz/elasticsearch-head
# Install specific version of a plugin
-- elasticsearch_plugin: state=present name="com.github.kzwang/elasticsearch-image" version="1.2.0"
+- elasticsearch_plugin:
+ state: present
+ name: com.github.kzwang/elasticsearch-image
+ version: '1.2.0'
# Uninstall Elasticsearch head plugin
-- elasticsearch_plugin: state=absent name="mobz/elasticsearch-head"
+- elasticsearch_plugin:
+ state: absent
+ name: mobz/elasticsearch-head
'''
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
def parse_plugin_repo(string):
elements = string.split("/")
@@ -99,70 +126,101 @@ def parse_plugin_repo(string):
return repo
-
def is_plugin_present(plugin_dir, working_dir):
return os.path.isdir(os.path.join(working_dir, plugin_dir))
-
def parse_error(string):
reason = "reason: "
- return string[string.index(reason) + len(reason):].strip()
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+def install_plugin(module, plugin_bin, plugin_name, version, url, proxy_host, proxy_port, timeout):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name]
-def main():
+ if version:
+ plugin_name = plugin_name + '/' + version
- package_state_map = dict(
- present="--install",
- absent="--remove"
- )
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
- state=dict(default="present", choices=package_state_map.keys()),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
url=dict(default=None),
timeout=dict(default="1m"),
- plugin_bin=dict(default="/usr/share/elasticsearch/bin/plugin"),
- plugin_dir=dict(default="/usr/share/elasticsearch/plugins/"),
+ plugin_bin=dict(default="/usr/share/elasticsearch/bin/plugin", type="path"),
+ plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
version=dict(default=None)
- )
+ ),
+ supports_check_mode=True
)
- plugin_bin = module.params["plugin_bin"]
- plugin_dir = module.params["plugin_dir"]
- name = module.params["name"]
- state = module.params["state"]
- url = module.params["url"]
- timeout = module.params["timeout"]
- version = module.params["version"]
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ timeout = module.params["timeout"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
# skip if the state is correct
if (present and state == "present") or (state == "absent" and not present):
- module.exit_json(changed=False, name=name)
-
- if (version):
- name = name + '/' + version
+ module.exit_json(changed=False, name=name, state=state)
- cmd_args = [plugin_bin, package_state_map[state], name]
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, url, proxy_host, proxy_port, timeout)
- if url:
- cmd_args.append("--url %s" % url)
-
- if timeout:
- cmd_args.append("--timeout %s" % timeout)
-
- cmd = " ".join(cmd_args)
-
- rc, out, err = module.run_command(cmd)
-
- if rc != 0:
- reason = parse_error(out)
- module.fail_json(msg=reason)
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
- module.exit_json(changed=True, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/kibana_plugin.py b/packaging/kibana_plugin.py
new file mode 100644
index 00000000000..91e2f23cf57
--- /dev/null
+++ b/packaging/kibana_plugin.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to manage elasticsearch shield role
+(c) 2016, Thierno IB. BARRY @barryib
+Sponsored by Polyconseil http://polyconseil.fr.
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see .
+"""
+
+import os
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: kibana_plugin
+short_description: Manage Kibana plugins
+description:
+ - Manages Kibana plugins.
+version_added: "2.2"
+author: Thierno IB. BARRY (@barryib)
+options:
+ name:
+ description:
+ - Name of the plugin to install
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ required: False
+ choices: ["present", "absent"]
+ default: present
+ url:
+ description:
+ - Set exact URL to download the plugin from.
+ For local file, prefix its absolute path with file://
+ required: False
+ default: None
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h..."
+ required: False
+ default: 1m
+ plugin_bin:
+ description:
+ - Location of the plugin binary
+ required: False
+ default: /opt/kibana/bin/kibana
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Kibana
+ required: False
+ default: /opt/kibana/installedPlugins/
+ version:
+ description:
+ - Version of the plugin to be installed.
+ If plugin exists with previous version, it will NOT be updated if C(force) is not set to yes
+ required: False
+ default: None
+ force:
+ description:
+ - Delete and re-install the plugin. Can be useful for plugins update
+ required: False
+ choices: ["yes", "no"]
+ default: no
+'''
+
+EXAMPLES = '''
+# Install Elasticsearch head plugin
+- kibana_plugin:
+ state: present
+ name=: elasticsearch/marvel
+
+# Install specific version of a plugin
+- kibana_plugin:
+ state: present
+ name: elasticsearch/marvel
+ version: '2.3.3'
+
+# Uninstall Elasticsearch head plugin
+- kibana_plugin:
+ state: absent
+ name: elasticsearch/marvel
+'''
+
+RETURN = '''
+cmd:
+ description: the launched command during plugin mangement (install / remove)
+ returned: success
+ type: string
+name:
+ description: the plugin name to install or remove
+ returned: success
+ type: string
+url:
+ description: the url from where the plugin is installed from
+ returned: success
+ type: string
+timeout:
+ description: the timout for plugin download
+ returned: success
+ type: string
+stdout:
+ description: the command stdout
+ returned: success
+ type: string
+stderr:
+ description: the command stderr
+ returned: success
+ type: string
+state:
+ description: the state for the managed plugin
+ returned: success
+ type: string
+'''
+
+PACKAGE_STATE_MAP = dict(
+ present="--install",
+ absent="--remove"
+)
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+def is_plugin_present(plugin_dir, working_dir):
+ return os.path.isdir(os.path.join(working_dir, plugin_dir))
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+def install_plugin(module, plugin_bin, plugin_name, url, timeout):
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
+ plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
+ version=dict(default=None),
+ force=dict(default="no", type="bool")
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ timeout = module.params["timeout"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ version = module.params["version"]
+ force = module.params["force"]
+
+ present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present" and not force) or (state == "absent" and not present and not force):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if (version):
+ name = name + '/' + version
+
+ if state == "present":
+ if force:
+ remove_plugin(module, plugin_bin, name)
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/language/bower.py b/packaging/language/bower.py
index c835fbf797d..489ab3cb804 100644
--- a/packaging/language/bower.py
+++ b/packaging/language/bower.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: bower
@@ -48,6 +52,12 @@
description:
- The base path where to install the bower packages
required: true
+ relative_execpath:
+ description:
+ - Relative path to bower executable from install path
+ default: null
+ required: false
+ version_added: "2.1"
state:
description:
- The state of the bower package
@@ -61,20 +71,37 @@
'''
EXAMPLES = '''
-description: Install "bootstrap" bower package.
-- bower: name=bootstrap
-
-description: Install "bootstrap" bower package on version 3.1.1.
-- bower: name=bootstrap version=3.1.1
-
-description: Remove the "bootstrap" bower package.
-- bower: name=bootstrap state=absent
-
-description: Install packages based on bower.json.
-- bower: path=/app/location
-
-description: Update packages based on bower.json to their latest version.
-- bower: path=/app/location state=latest
+- name: Install "bootstrap" bower package.
+ bower:
+ name: bootstrap
+
+- name: Install "bootstrap" bower package on version 3.1.1.
+ bower:
+ name: bootstrap
+ version: '3.1.1'
+
+- name: Remove the "bootstrap" bower package.
+ bower:
+ name: bootstrap
+ state: absent
+
+- name: Install packages based on bower.json.
+ bower:
+ path: /app/location
+
+- name: Update packages based on bower.json to their latest version.
+ bower:
+ path: /app/location
+ state: latest
+
+# install bower locally and run from there
+- npm:
+ path: /app/location
+ name: bower
+ global: no
+- bower:
+ path: /app/location
+ relative_execpath: node_modules/.bin
'''
@@ -85,6 +112,7 @@ def __init__(self, module, **kwargs):
self.offline = kwargs['offline']
self.production = kwargs['production']
self.path = kwargs['path']
+ self.relative_execpath = kwargs['relative_execpath']
self.version = kwargs['version']
if kwargs['version']:
@@ -94,7 +122,17 @@ def __init__(self, module, **kwargs):
def _exec(self, args, run_in_check_mode=False, check_rc=True):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
- cmd = ["bower"] + args + ['--config.interactive=false', '--allow-root']
+ cmd = []
+
+ if self.relative_execpath:
+ cmd.append(os.path.join(self.path, self.relative_execpath, "bower"))
+ if not os.path.isfile(cmd[-1]):
+ self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath)
+ else:
+ cmd.append("bower")
+
+ cmd.extend(args)
+ cmd.extend(['--config.interactive=false', '--allow-root'])
if self.name:
cmd.append(self.name_version)
@@ -130,10 +168,9 @@ def list(self):
dep_data = data['dependencies'][dep]
if dep_data.get('missing', False):
missing.append(dep)
- elif \
- 'version' in dep_data['pkgMeta'] and \
- 'update' in dep_data and \
- dep_data['pkgMeta']['version'] != dep_data['update']['latest']:
+ elif ('version' in dep_data['pkgMeta'] and
+ 'update' in dep_data and
+ dep_data['pkgMeta']['version'] != dep_data['update']['latest']):
outdated.append(dep)
elif dep_data.get('incompatible', False):
outdated.append(dep)
@@ -160,7 +197,8 @@ def main():
name=dict(default=None),
offline=dict(default='no', type='bool'),
production=dict(default='no', type='bool'),
- path=dict(required=True),
+ path=dict(required=True, type='path'),
+ relative_execpath=dict(default=None, required=False, type='path'),
state=dict(default='present', choices=['present', 'absent', 'latest', ]),
version=dict(default=None),
)
@@ -172,13 +210,14 @@ def main():
offline = module.params['offline']
production = module.params['production']
path = os.path.expanduser(module.params['path'])
+ relative_execpath = module.params['relative_execpath']
state = module.params['state']
version = module.params['version']
if state == 'absent' and not name:
module.fail_json(msg='uninstalling a package is only available for named packages')
- bower = Bower(module, name=name, offline=offline, production=production, path=path, version=version)
+ bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version)
changed = False
if state == 'present':
@@ -201,4 +240,5 @@ def main():
# Import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/language/bundler.py b/packaging/language/bundler.py
index f4aeff4156f..e7950b08548 100644
--- a/packaging/language/bundler.py
+++ b/packaging/language/bundler.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION='''
---
module: bundler
@@ -112,19 +116,29 @@
EXAMPLES='''
# Installs gems from a Gemfile in the current directory
-- bundler: state=present executable=~/.rvm/gems/2.1.5/bin/bundle
+- bundler:
+ state: present
+ executable: ~/.rvm/gems/2.1.5/bin/bundle
# Excludes the production group from installing
-- bundler: state=present exclude_groups=production
+- bundler:
+ state: present
+ exclude_groups: production
# Only install gems from the default and production groups
-- bundler: state=present deployment=yes
+- bundler:
+ state: present
+ deployment_mode: yes
# Installs gems using a Gemfile in another directory
-- bundler: state=present gemfile=../rails_project/Gemfile
+- bundler:
+ state: present
+ gemfile: ../rails_project/Gemfile
# Updates Gemfile in another directory
-- bundler: state=latest chdir=~/rails_project
+- bundler:
+ state: latest
+ chdir: ~/rails_project
'''
@@ -140,15 +154,15 @@ def main():
argument_spec=dict(
executable=dict(default=None, required=False),
state=dict(default='present', required=False, choices=['present', 'latest']),
- chdir=dict(default=None, required=False),
+ chdir=dict(default=None, required=False, type='path'),
exclude_groups=dict(default=None, required=False, type='list'),
clean=dict(default=False, required=False, type='bool'),
- gemfile=dict(default=None, required=False),
+ gemfile=dict(default=None, required=False, type='path'),
local=dict(default=False, required=False, type='bool'),
deployment_mode=dict(default=False, required=False, type='bool'),
user_install=dict(default=True, required=False, type='bool'),
- gem_path=dict(default=None, required=False),
- binstub_directory=dict(default=None, required=False),
+ gem_path=dict(default=None, required=False, type='path'),
+ binstub_directory=dict(default=None, required=False, type='path'),
extra_args=dict(default=None, required=False),
),
supports_check_mode=True
@@ -163,7 +177,7 @@ def main():
local = module.params.get('local')
deployment_mode = module.params.get('deployment_mode')
user_install = module.params.get('user_install')
- gem_path = module.params.get('gem_install_path')
+ gem_path = module.params.get('gem_path')
binstub_directory = module.params.get('binstub_directory')
extra_args = module.params.get('extra_args')
diff --git a/packaging/language/composer.py b/packaging/language/composer.py
index 1ef93e736fc..172acb4ad1c 100644
--- a/packaging/language/composer.py
+++ b/packaging/language/composer.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: composer
@@ -36,6 +40,12 @@
- Composer command like "install", "update" and so on
required: false
default: install
+ arguments:
+ version_added: "2.0"
+ description:
+ - Composer arguments like required package, version and so on
+ required: false
+ default: null
working_dir:
description:
- Directory of your project ( see --working-dir )
@@ -97,11 +107,26 @@
- composer installed in bin path (recommended /usr/local/bin)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
+ - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid it.
'''
EXAMPLES = '''
# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
-- composer: command=install working_dir=/path/to/project
+- composer:
+ command: install
+ working_dir: /path/to/project
+
+- composer:
+ command: require
+ arguments: my/package
+ working_dir: /path/to/project
+
+# Clone project and install with all dependencies
+- composer:
+ command: create-project
+ arguments: package/package /path/to/project ~1.0
+ working_dir: /path/to/project
+ prefer_dist: yes
'''
import os
@@ -110,7 +135,12 @@
try:
import json
except ImportError:
- import simplejson as json
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
def parse_out(string):
return re.sub("\s+", " ", string).strip()
@@ -128,16 +158,17 @@ def get_available_options(module, command='install'):
command_help_json = json.loads(out)
return command_help_json['definition']['options']
-def composer_command(module, command, options=[]):
+def composer_command(module, command, arguments = "", options=[]):
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
- cmd = "%s %s %s %s" % (php_path, composer_path, command, " ".join(options))
+ cmd = "%s %s %s %s %s" % (php_path, composer_path, command, " ".join(options), arguments)
return module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec = dict(
command = dict(default="install", type="str", required=False),
+ arguments = dict(default="", type="str", required=False),
working_dir = dict(aliases=["working-dir"], required=True),
prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]),
prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]),
@@ -152,6 +183,10 @@ def main():
# Get composer command with fallback to default
command = module.params['command']
+ if re.search(r"\s", command):
+ module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
+
+ arguments = module.params['arguments']
available_options = get_available_options(module=module, command=command)
options = []
@@ -188,15 +223,15 @@ def main():
if module.check_mode:
options.append('--dry-run')
- rc, out, err = composer_command(module, command, options)
+ rc, out, err = composer_command(module, command, arguments, options)
if rc != 0:
output = parse_out(err)
- module.fail_json(msg=output)
+ module.fail_json(msg=output, stdout=err)
else:
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
output = parse_out(out + err)
- module.exit_json(changed=has_changed(output), msg=output)
+ module.exit_json(changed=has_changed(output), msg=output, stdout=out+err)
# import module snippets
from ansible.module_utils.basic import *
diff --git a/packaging/language/cpanm.py b/packaging/language/cpanm.py
index 10bb66522ae..59677698069 100644
--- a/packaging/language/cpanm.py
+++ b/packaging/language/cpanm.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: cpanm
@@ -64,6 +68,12 @@
required: false
default: false
version_added: "2.0"
+ version:
+ description:
+ - minimum version of perl module to consider acceptable
+ required: false
+ default: false
+ version_added: "2.1"
system_lib:
description:
- Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work.
@@ -72,6 +82,12 @@
default: false
version_added: "2.0"
aliases: ['use_sudo']
+ executable:
+ description:
+ - Override the path to the cpanm executable
+ required: false
+ default: null
+ version_added: "2.1"
notes:
- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
author: "Franck Cuny (@franckcuny)"
@@ -79,78 +95,108 @@
EXAMPLES = '''
# install Dancer perl package
-- cpanm: name=Dancer
+- cpanm:
+ name: Dancer
# install version 0.99_05 of the Plack perl package
-- cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz
+- cpanm:
+ name: MIYAGAWA/Plack-0.99_05.tar.gz
# install Dancer into the specified locallib
-- cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib
+- cpanm:
+ name: Dancer
+ locallib: /srv/webapps/my_app/extlib
# install perl dependencies from local directory
-- cpanm: from_path=/srv/webapps/my_app/src/
+- cpanm:
+ from_path: /srv/webapps/my_app/src/
# install Dancer perl package without running the unit tests in indicated locallib
-- cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib
+- cpanm:
+ name: Dancer
+ notest: True
+ locallib: /srv/webapps/my_app/extlib
# install Dancer perl package from a specific mirror
-- cpanm: name=Dancer mirror=http://cpan.cpantesters.org/
+- cpanm:
+ name: Dancer
+ mirror: 'http://cpan.cpantesters.org/'
# install Dancer perl package into the system root path
-- cpanm: name=Dancer system_lib=yes
+- cpanm:
+ name: Dancer
+ system_lib: yes
+
+# install Dancer if it's not already installed
+# OR the installed version is older than version 1.0
+- cpanm:
+ name: Dancer
+ version: '1.0'
'''
-def _is_package_installed(module, name, locallib, cpanm):
+def _is_package_installed(module, name, locallib, cpanm, version):
cmd = ""
if locallib:
os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
- cmd = "%s perl -M%s -e '1'" % (cmd, name)
+ cmd = "%s perl -e ' use %s" % (cmd, name)
+ if version:
+ cmd = "%s %s;'" % (cmd, version)
+ else:
+ cmd = "%s;'" % cmd
res, stdout, stderr = module.run_command(cmd, check_rc=False)
if res == 0:
return True
else:
return False
-def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only,
- installdeps, cpanm):
+def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo):
# this code should use "%s" like everything else and just return early but not fixing all of it now.
# don't copy stuff like this
if from_path:
- cmd = "{cpanm} {path}".format(cpanm=cpanm, path=from_path)
+ cmd = cpanm + " " + from_path
else:
- cmd = "{cpanm} {name}".format(cpanm=cpanm, name=name)
+ cmd = cpanm + " " + name
if notest is True:
- cmd = "{cmd} -n".format(cmd=cmd)
+ cmd = cmd + " -n"
if locallib is not None:
- cmd = "{cmd} -l {locallib}".format(cmd=cmd, locallib=locallib)
+ cmd = cmd + " -l " + locallib
if mirror is not None:
- cmd = "{cmd} --mirror {mirror}".format(cmd=cmd, mirror=mirror)
+ cmd = cmd + " --mirror " + mirror
if mirror_only is True:
- cmd = "{cmd} --mirror-only".format(cmd=cmd)
+ cmd = cmd + " --mirror-only"
if installdeps is True:
- cmd = "{cmd} --installdeps".format(cmd=cmd)
+ cmd = cmd + " --installdeps"
if use_sudo is True:
- cmd = "{cmd} --sudo".format(cmd=cmd)
+ cmd = cmd + " --sudo"
return cmd
+def _get_cpanm_path(module):
+ if module.params['executable']:
+ return module.params['executable']
+ else:
+ return module.get_bin_path('cpanm', True)
+
+
def main():
arg_spec = dict(
name=dict(default=None, required=False, aliases=['pkg']),
- from_path=dict(default=None, required=False),
+ from_path=dict(default=None, required=False, type='path'),
notest=dict(default=False, type='bool'),
- locallib=dict(default=None, required=False),
+ locallib=dict(default=None, required=False, type='path'),
mirror=dict(default=None, required=False),
mirror_only=dict(default=False, type='bool'),
installdeps=dict(default=False, type='bool'),
system_lib=dict(default=False, type='bool', aliases=['use_sudo']),
+ version=dict(default=None, required=False),
+ executable=dict(required=False, type='path'),
)
module = AnsibleModule(
@@ -158,7 +204,7 @@ def main():
required_one_of=[['name', 'from_path']],
)
- cpanm = module.get_bin_path('cpanm', True)
+ cpanm = _get_cpanm_path(module)
name = module.params['name']
from_path = module.params['from_path']
notest = module.boolean(module.params.get('notest', False))
@@ -167,22 +213,21 @@ def main():
mirror_only = module.params['mirror_only']
installdeps = module.params['installdeps']
use_sudo = module.params['system_lib']
+ version = module.params['version']
changed = False
- installed = _is_package_installed(module, name, locallib, cpanm)
+ installed = _is_package_installed(module, name, locallib, cpanm, version)
if not installed:
- out_cpanm = err_cpanm = ''
- cmd = _build_cmd_line(name, from_path, notest, locallib, mirror,
- mirror_only, installdeps, cpanm)
+ cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo)
rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
if rc_cpanm != 0:
module.fail_json(msg=err_cpanm, cmd=cmd)
- if err_cpanm and 'is up to date' not in err_cpanm:
+ if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1):
changed = True
module.exit_json(changed=changed, binary=cpanm, name=name)
@@ -190,4 +235,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py
index 658ad7f1173..d4a241d0e9d 100644
--- a/packaging/language/maven_artifact.py
+++ b/packaging/language/maven_artifact.py
@@ -25,6 +25,19 @@
import os
import hashlib
import sys
+import posixpath
+import urlparse
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+try:
+ import boto3
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -39,6 +52,7 @@
requirements:
- "python >= 2.6"
- lxml
+ - boto if using a S3 repository (s3://...)
options:
group_id:
description:
@@ -54,30 +68,33 @@
required: false
default: latest
classifier:
- description:
+ description:
- The maven classifier coordinate
required: false
default: null
extension:
- description:
+ description:
- The maven type/extension coordinate
required: false
default: jar
repository_url:
- description:
- - The URL of the Maven Repository to download from
+ description:
+ - The URL of the Maven Repository to download from.
+ - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
required: false
default: http://repo1.maven.org/maven2
username:
description:
- - The username to authenticate as to the Maven Repository
+ - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
required: false
default: null
+ aliases: [ "aws_secret_key" ]
password:
description:
- - The password to authenticate with to the Maven Repository
+ - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
required: false
default: null
+ aliases: [ "aws_secret_access_key" ]
dest:
description:
- The path where the artifact should be written to
@@ -89,8 +106,14 @@
required: true
default: present
choices: [present,absent]
+ timeout:
+ description:
+ - Specifies a timeout in seconds for the connection attempt
+ required: false
+ default: 10
+ version_added: "2.3"
validate_certs:
- description:
+ description:
- If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists.
required: false
default: 'yes'
@@ -99,17 +122,35 @@
'''
EXAMPLES = '''
-# Download the latest version of the commons-collections artifact from Maven Central
-- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections dest=/tmp/commons-collections-latest.jar
-
-# Download Apache Commons-Collections 3.2 from Maven Central
-- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 dest=/tmp/commons-collections-3.2.jar
+# Download the latest version of the JUnit framework artifact from Maven Central
+- maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ dest: /tmp/junit-latest.jar
+
+# Download JUnit 4.11 from Maven Central
+- maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ version: 4.11
+ dest: /tmp/junit-4.11.jar
# Download an artifact from a private repository requiring authentication
-- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass dest=/tmp/library-name-latest.jar
+- maven_artifact:
+ group_id: com.company
+ artifact_id: library-name
+ repository_url: 'https://repo.company.com/maven'
+ username: user
+ password: pass
+ dest: /tmp/library-name-latest.jar
# Download a WAR File to the Tomcat webapps directory to be deployed
-- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven dest=/var/lib/tomcat7/webapps/web-app.war
+- maven_artifact:
+ group_id: com.company
+ artifact_id: web-app
+ extension: war
+ repository_url: 'https://repo.company.com/maven'
+ dest: /var/lib/tomcat7/webapps/web-app.war
'''
class Artifact(object):
@@ -133,9 +174,9 @@ def is_snapshot(self):
return self.version and self.version.endswith("SNAPSHOT")
def path(self, with_version=True):
- base = self.group_id.replace(".", "/") + "/" + self.artifact_id
+ base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
if with_version and self.version:
- return base + "/" + self.version
+ return posixpath.join(base, self.version)
else:
return base
@@ -195,14 +236,20 @@ def _find_latest_version_available(self, artifact):
return v[0]
def find_uri_for_artifact(self, artifact):
+ if artifact.version == "latest":
+ artifact.version = self._find_latest_version_available(artifact)
+
if artifact.is_snapshot():
path = "/%s/maven-metadata.xml" % (artifact.path())
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0]
buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
+ for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
+ if len(snapshotArtifact.xpath("classifier/text()")) > 0 and snapshotArtifact.xpath("classifier/text()")[0] == artifact.classifier and len(snapshotArtifact.xpath("extension/text()")) > 0 and snapshotArtifact.xpath("extension/text()")[0] == artifact.extension:
+ return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0])
return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber))
- else:
- return self._uri_for_artifact(artifact)
+
+ return self._uri_for_artifact(artifact, artifact.version)
def _uri_for_artifact(self, artifact, version=None):
if artifact.is_snapshot() and not version:
@@ -210,19 +257,30 @@ def _uri_for_artifact(self, artifact, version=None):
elif not artifact.is_snapshot():
version = artifact.version
if artifact.classifier:
- return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension)
- return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "." + artifact.extension
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension)
def _request(self, url, failmsg, f):
+ url_to_use = url
+ parsed_url = urlparse(url)
+ if parsed_url.scheme=='s3':
+ parsed_url = urlparse(url)
+ bucket_name = parsed_url.netloc
+ key_name = parsed_url.path[1:]
+ client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
+ url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10)
+
+ req_timeout = self.module.params.get('timeout')
+
# Hack to add parameters in the way that fetch_url expects
self.module.params['url_username'] = self.module.params.get('username', '')
self.module.params['url_password'] = self.module.params.get('password', '')
self.module.params['http_agent'] = self.module.params.get('user_agent', None)
- response, info = fetch_url(self.module, url)
+ response, info = fetch_url(self.module, url_to_use, timeout=req_timeout)
if info['status'] != 200:
- raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url)
+ raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
else:
return f(response)
@@ -237,9 +295,10 @@ def download(self, artifact, filename=None):
if not self.verify_md5(filename, url + ".md5"):
response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r)
if response:
- with open(filename, 'w') as f:
- # f.write(response.read())
- self._write_chunks(response, f, report_hook=self.chunk_report)
+ f = open(filename, 'w')
+ # f.write(response.read())
+ self._write_chunks(response, f, report_hook=self.chunk_report)
+ f.close()
return True
else:
return False
@@ -283,9 +342,10 @@ def verify_md5(self, file, remote_md5):
def _local_md5(self, file):
md5 = hashlib.md5()
- with open(file, 'rb') as f:
- for chunk in iter(lambda: f.read(8192), ''):
- md5.update(chunk)
+ f = open(file, 'rb')
+ for chunk in iter(lambda: f.read(8192), ''):
+ md5.update(chunk)
+ f.close()
return md5.hexdigest()
@@ -294,18 +354,27 @@ def main():
argument_spec = dict(
group_id = dict(default=None),
artifact_id = dict(default=None),
- version = dict(default=None),
+ version = dict(default="latest"),
classifier = dict(default=None),
- extension = dict(default=None, required=True),
+ extension = dict(default='jar'),
repository_url = dict(default=None),
- username = dict(default=None),
- password = dict(default=None),
+ username = dict(default=None,aliases=['aws_secret_key']),
+ password = dict(default=None, no_log=True,aliases=['aws_secret_access_key']),
state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state
- dest = dict(default=None),
+ timeout = dict(default=10, type='int'),
+ dest = dict(type="path", default=None),
validate_certs = dict(required=False, default=True, type='bool'),
)
)
+ try:
+ parsed_url = urlparse(module.params["repository_url"])
+ except AttributeError as e:
+ module.fail_json(msg='url parsing went wrong %s' % e)
+
+ if parsed_url.scheme=='s3' and not HAS_BOTO:
+ module.fail_json(msg='boto3 required for this module, when using s3:// repository URLs')
+
group_id = module.params["group_id"]
artifact_id = module.params["artifact_id"]
version = module.params["version"]
@@ -330,8 +399,8 @@ def main():
prev_state = "absent"
if os.path.isdir(dest):
- dest = dest + "/" + artifact_id + "-" + version + "." + extension
- if os.path.lexists(dest):
+ dest = posixpath.join(dest, artifact_id + "-" + version + "." + extension)
+ if os.path.lexists(dest) and downloader.verify_md5(dest, downloader.find_uri_for_artifact(artifact) + '.md5'):
prev_state = "present"
else:
path = os.path.dirname(dest)
@@ -350,8 +419,6 @@ def main():
module.fail_json(msg=e.args[0])
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
+
if __name__ == '__main__':
main()
diff --git a/packaging/language/npm.py b/packaging/language/npm.py
index d804efff331..b1df88e60a2 100644
--- a/packaging/language/npm.py
+++ b/packaging/language/npm.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: npm
@@ -78,28 +82,46 @@
EXAMPLES = '''
description: Install "coffee-script" node.js package.
-- npm: name=coffee-script path=/app/location
+- npm:
+ name: coffee-script
+ path: /app/location
description: Install "coffee-script" node.js package on version 1.6.1.
-- npm: name=coffee-script version=1.6.1 path=/app/location
+- npm:
+ name: coffee-script
+ version: '1.6.1'
+ path: /app/location
description: Install "coffee-script" node.js package globally.
-- npm: name=coffee-script global=yes
+- npm:
+ name: coffee-script
+ global: yes
description: Remove the globally package "coffee-script".
-- npm: name=coffee-script global=yes state=absent
+- npm:
+ name: coffee-script
+ global: yes
+ state: absent
description: Install "coffee-script" node.js package from custom registry.
-- npm: name=coffee-script registry=http://registry.mysite.com
+- npm:
+ name: coffee-script
+ registry: 'http://registry.mysite.com'
description: Install packages based on package.json.
-- npm: path=/app/location
+- npm:
+ path: /app/location
description: Update packages based on package.json to their latest version.
-- npm: path=/app/location state=latest
+- npm:
+ path: /app/location
+ state: latest
description: Install packages based on package.json using the npm installed with nvm v0.10.1.
-- npm: path=/app/location executable=/opt/nvm/v0.10.1/bin/npm state=present
+- npm:
+ path: /app/location
+ executable: /opt/nvm/v0.10.1/bin/npm
+ state: present
'''
import os
@@ -107,7 +129,12 @@
try:
import json
except ImportError:
- import simplejson as json
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
class Npm(object):
def __init__(self, module, **kwargs):
@@ -126,7 +153,7 @@ def __init__(self, module, **kwargs):
self.executable = [module.get_bin_path('npm', True)]
if kwargs['version']:
- self.name_version = self.name + '@' + self.version
+ self.name_version = self.name + '@' + str(self.version)
else:
self.name_version = self.name
@@ -149,7 +176,6 @@ def _exec(self, args, run_in_check_mode=False, check_rc=True):
#If path is specified, cd into that path and run the command.
cwd = None
if self.path:
- self.path = os.path.abspath(os.path.expanduser(self.path))
if not os.path.exists(self.path):
os.makedirs(self.path)
if not os.path.isdir(self.path):
@@ -207,10 +233,10 @@ def list_outdated(self):
def main():
arg_spec = dict(
name=dict(default=None),
- path=dict(default=None),
+ path=dict(default=None, type='path'),
version=dict(default=None),
production=dict(default='no', type='bool'),
- executable=dict(default=None),
+ executable=dict(default=None, type='path'),
registry=dict(default=None),
state=dict(default='present', choices=['present', 'absent', 'latest']),
ignore_scripts=dict(default=False, type='bool'),
@@ -248,9 +274,12 @@ def main():
elif state == 'latest':
installed, missing = npm.list()
outdated = npm.list_outdated()
- if len(missing) or len(outdated):
+ if len(missing):
changed = True
npm.install()
+ if len(outdated):
+ changed = True
+ npm.update()
else: #absent
installed, missing = npm.list()
if name in installed:
@@ -261,4 +290,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/language/pear.py b/packaging/language/pear.py
index 5762f9c815c..0379538874d 100644
--- a/packaging/language/pear.py
+++ b/packaging/language/pear.py
@@ -20,6 +20,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: pear
@@ -45,16 +49,24 @@
EXAMPLES = '''
# Install pear package
-- pear: name=Net_URL2 state=present
+- pear:
+ name: Net_URL2
+ state: present
# Install pecl package
-- pear: name=pecl/json_post state=present
+- pear:
+ name: pecl/json_post
+ state: present
# Upgrade package
-- pear: name=Net_URL2 state=latest
+- pear:
+ name: Net_URL2
+ state: latest
# Remove packages
-- pear: name=Net_URL2,pecl/json_post state=absent
+- pear:
+ name: Net_URL2,pecl/json_post
+ state: absent
'''
import os
@@ -224,4 +236,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/apk.py b/packaging/os/apk.py
index ec0e3908faf..8d8c5a6f808 100644
--- a/packaging/os/apk.py
+++ b/packaging/os/apk.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: apk
@@ -42,7 +46,7 @@
choices: [ "present", "absent", "latest" ]
update_cache:
description:
- - Update repository indexes. Can be run with other steps or on it's own.
+ - Update repository indexes. Can be run with other steps or on it's own.
required: false
default: no
choices: [ "yes", "no" ]
@@ -52,38 +56,60 @@
required: false
default: no
choices: [ "yes", "no" ]
+notes:
+ - '"name" and "upgrade" are mutually exclusive.'
'''
EXAMPLES = '''
# Update repositories and install "foo" package
-- apk: name=foo update_cache=yes
+- apk:
+ name: foo
+ update_cache: yes
# Update repositories and install "foo" and "bar" packages
-- apk: name=foo,bar update_cache=yes
+- apk:
+ name: foo,bar
+ update_cache: yes
# Remove "foo" package
-- apk: name=foo state=absent
+- apk:
+ name: foo
+ state: absent
# Remove "foo" and "bar" packages
-- apk: name=foo,bar state=absent
+- apk:
+ name: foo,bar
+ state: absent
# Install the package "foo"
-- apk: name=foo state=present
+- apk:
+ name: foo
+ state: present
# Install the packages "foo" and "bar"
-- apk: name=foo,bar state=present
+- apk:
+ name: foo,bar
+ state: present
# Update repositories and update package "foo" to latest version
-- apk: name=foo state=latest update_cache=yes
+- apk:
+ name: foo
+ state: latest
+ update_cache: yes
# Update repositories and update packages "foo" and "bar" to latest versions
-- apk: name=foo,bar state=latest update_cache=yes
+- apk:
+ name: foo,bar
+ state: latest
+ update_cache: yes
# Update all installed packages to the latest versions
-- apk: upgrade=yes
+- apk:
+ upgrade: yes
# Update repositories as a separate step
-- apk: update_cache=yes
+- apk:
+ update_cache: yes
'''
import os
@@ -114,6 +140,23 @@ def query_latest(module, name):
return False
return True
+def query_virtual(module, name):
+ cmd = "%s -v info --description %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = "^%s: virtual meta package" % (name)
+ if re.search(search_pattern, stdout):
+ return True
+ return False
+
+def get_dependencies(module, name):
+ cmd = "%s -v info --depends %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ dependencies = stdout.split()
+ if len(dependencies) > 1:
+ return dependencies[1:]
+ else:
+ return []
+
def upgrade_packages(module):
if module.check_mode:
cmd = "%s upgrade --simulate" % (APK_PATH)
@@ -128,29 +171,40 @@ def upgrade_packages(module):
def install_packages(module, names, state):
upgrade = False
- uninstalled = []
+ to_install = []
+ to_upgrade = []
for name in names:
- if not query_package(module, name):
- uninstalled.append(name)
- elif state == 'latest' and not query_latest(module, name):
- upgrade = True
- if not uninstalled and not upgrade:
+ # Check if virtual package
+ if query_virtual(module, name):
+ # Get virtual package dependencies
+ dependencies = get_dependencies(module, name)
+ for dependency in dependencies:
+ if state == 'latest' and not query_latest(module, dependency):
+ to_upgrade.append(dependency)
+ else:
+ if not query_package(module, name):
+ to_install.append(name)
+ elif state == 'latest' and not query_latest(module, name):
+ to_upgrade.append(name)
+ if to_upgrade:
+ upgrade = True
+ if not to_install and not upgrade:
module.exit_json(changed=False, msg="package(s) already installed")
- names = " ".join(uninstalled)
+ packages = " ".join(to_install) + " ".join(to_upgrade)
if upgrade:
if module.check_mode:
- cmd = "%s add --upgrade --simulate %s" % (APK_PATH, names)
+ cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
else:
- cmd = "%s add --upgrade %s" % (APK_PATH, names)
+ cmd = "%s add --upgrade %s" % (APK_PATH, packages)
else:
if module.check_mode:
- cmd = "%s add --simulate %s" % (APK_PATH, names)
+ cmd = "%s add --simulate %s" % (APK_PATH, packages)
else:
- cmd = "%s add %s" % (APK_PATH, names)
+ cmd = "%s add %s" % (APK_PATH, packages)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
- module.fail_json(msg="failed to install %s" % (names))
- module.exit_json(changed=True, msg="installed %s package(s)" % (names))
+ module.fail_json(msg="failed to install %s" % (packages))
+ module.exit_json(changed=True, msg="installed %s package(s)" % (packages))
def remove_packages(module, names):
installed = []
@@ -168,7 +222,7 @@ def remove_packages(module, names):
if rc != 0:
module.fail_json(msg="failed to remove %s package(s)" % (names))
module.exit_json(changed=True, msg="removed %s package(s)" % (names))
-
+
# ==========================================
# Main control flow.
@@ -177,13 +231,17 @@ def main():
argument_spec = dict(
state = dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
name = dict(type='list'),
- update_cache = dict(default='no', choices=BOOLEANS, type='bool'),
- upgrade = dict(default='no', choices=BOOLEANS, type='bool'),
+ update_cache = dict(default='no', type='bool'),
+ upgrade = dict(default='no', type='bool'),
),
required_one_of = [['name', 'update_cache', 'upgrade']],
+ mutually_exclusive = [['name', 'upgrade']],
supports_check_mode = True
)
+ # Set LANG env since we parse stdout
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
global APK_PATH
APK_PATH = module.get_bin_path('apk', required=True)
diff --git a/packaging/os/dnf.py b/packaging/os/dnf.py
index c4522f9105a..016fdf60453 100644
--- a/packaging/os/dnf.py
+++ b/packaging/os/dnf.py
@@ -20,6 +20,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: dnf
@@ -86,7 +90,7 @@
# informational: requirements for nodes
requirements:
- "python >= 2.6"
- - dnf
+ - python-dnf
author:
- '"Igor Gnatenko (@ignatenkobrain)" '
- '"Cristian van Ee (@DJMuggs)" '
@@ -94,42 +98,82 @@
EXAMPLES = '''
- name: install the latest version of Apache
- dnf: name=httpd state=latest
+ dnf:
+ name: httpd
+ state: latest
- name: remove the Apache package
- dnf: name=httpd state=absent
+ dnf:
+ name: httpd
+ state: absent
- name: install the latest version of Apache from the testing repo
- dnf: name=httpd enablerepo=testing state=present
+ dnf:
+ name: httpd
+ enablerepo: testing
+ state: present
- name: upgrade all packages
- dnf: name=* state=latest
+ dnf:
+ name: *
+ state: latest
- name: install the nginx rpm from a remote repo
- dnf: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
+ dnf:
+ name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm'
+ state: present
- name: install nginx rpm from a local file
- dnf: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
+ dnf:
+ name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
- name: install the 'Development tools' package group
- dnf: name="@Development tools" state=present
-
+ dnf:
+ name: '@Development tools'
+ state: present
'''
import os
try:
import dnf
- from dnf import cli, const, exceptions, subject, util
+ import dnf
+ import dnf.cli
+ import dnf.const
+ import dnf.exceptions
+ import dnf.subject
+ import dnf.util
HAS_DNF = True
except ImportError:
HAS_DNF = False
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import PY2
-def _fail_if_no_dnf(module):
- """Fail if unable to import dnf."""
+
+def _ensure_dnf(module):
if not HAS_DNF:
- module.fail_json(
- msg="`python-dnf` is not installed, but it is required for the Ansible dnf module.")
+ if PY2:
+ package = 'python2-dnf'
+ else:
+ package = 'python3-dnf'
+
+ if module.check_mode:
+ module.fail_json(msg="`{0}` is not installed, but it is required"
+ " for the Ansible dnf module.".format(package))
+
+ module.run_command(['dnf', 'install', '-y', package], check_rc=True)
+ global dnf
+ try:
+ import dnf
+ import dnf.cli
+ import dnf.const
+ import dnf.exceptions
+ import dnf.subject
+ import dnf.util
+ except ImportError:
+ module.fail_json(msg="Could not import the dnf python module."
+ " Please install `{0}` package.".format(package))
def _configure_base(module, base, conf_file, disable_gpg_check):
@@ -176,11 +220,10 @@ def _specify_repositories(base, disablerepo, enablerepo):
def _base(module, conf_file, disable_gpg_check, disablerepo, enablerepo):
"""Return a fully configured dnf Base object."""
- _fail_if_no_dnf(module)
base = dnf.Base()
_configure_base(module, base, conf_file, disable_gpg_check)
_specify_repositories(base, disablerepo, enablerepo)
- base.fill_sack()
+ base.fill_sack(load_system_repo='auto')
return base
@@ -219,7 +262,7 @@ def list_items(module, base, command):
for repo in base.repos.iter_enabled()]
# Return any matching packages
else:
- packages = subject.Subject(command).get_best_query(base.sack)
+ packages = dnf.subject.Subject(command).get_best_query(base.sack)
results = [_package_dict(package) for package in packages]
module.exit_json(results=results)
@@ -229,85 +272,167 @@ def _mark_package_install(module, base, pkg_spec):
"""Mark the package for install."""
try:
base.install(pkg_spec)
- except exceptions.MarkingError:
+ except dnf.exceptions.MarkingError:
module.fail_json(msg="No package {} available.".format(pkg_spec))
-def ensure(module, base, state, names):
- if not util.am_i_root():
- module.fail_json(msg="This command has to be run under the root user.")
+def _parse_spec_group_file(names):
+ pkg_specs, grp_specs, filenames = [], [], []
+ for name in names:
+ if name.endswith(".rpm"):
+ filenames.append(name)
+ elif name.startswith("@"):
+ grp_specs.append(name[1:])
+ else:
+ pkg_specs.append(name)
+ return pkg_specs, grp_specs, filenames
+
+def _install_remote_rpms(base, filenames):
+ if int(dnf.__version__.split(".")[0]) >= 2:
+ pkgs = list(sorted(base.add_remote_rpms(list(filenames)), reverse=True))
+ else:
+ pkgs = []
+ for filename in filenames:
+ pkgs.append(base.add_remote_rpm(filename))
+ for pkg in pkgs:
+ base.package_install(pkg)
+
+
+def ensure(module, base, state, names):
+ # Accumulate failures. Package management modules install what they can
+ # and fail with a message about what they can't.
+ failures = []
+ allow_erasing = False
if names == ['*'] and state == 'latest':
base.upgrade_all()
else:
- pkg_specs, group_specs, filenames = cli.commands.parse_spec_group_file(
- names)
+ pkg_specs, group_specs, filenames = _parse_spec_group_file(names)
if group_specs:
base.read_comps()
+ pkg_specs = [p.strip() for p in pkg_specs]
+ filenames = [f.strip() for f in filenames]
groups = []
- for group_spec in group_specs:
+ environments = []
+ for group_spec in (g.strip() for g in group_specs):
group = base.comps.group_by_pattern(group_spec)
if group:
groups.append(group)
else:
- module.fail_json(
- msg="No group {} available.".format(group_spec))
+ environment = base.comps.environment_by_pattern(group_spec)
+ if environment:
+ environments.append(environment.id)
+ else:
+ module.fail_json(
+ msg="No group {} available.".format(group_spec))
if state in ['installed', 'present']:
# Install files.
- for filename in filenames:
- base.package_install(base.add_remote_rpm(filename))
+ _install_remote_rpms(base, filenames)
+
# Install groups.
for group in groups:
- base.group_install(group, const.GROUP_PACKAGE_TYPES)
+ try:
+ base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
+ except dnf.exceptions.Error as e:
+ # In dnf 2.0 if all the mandatory packages in a group do
+ # not install, an error is raised. We want to capture
+ # this but still install as much as possible.
+ failures.append((group, e))
+
+ for environment in environments:
+ try:
+ base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
+ except dnf.exceptions.Error as e:
+ failures.append((group, e))
+
# Install packages.
for pkg_spec in pkg_specs:
_mark_package_install(module, base, pkg_spec)
elif state == 'latest':
# "latest" is same as "installed" for filenames.
- for filename in filenames:
- base.package_install(base.add_remote_rpm(filename))
+ _install_remote_rpms(base, filenames)
+
for group in groups:
try:
- base.group_upgrade(group)
- except exceptions.CompsError:
- # If not already installed, try to install.
- base.group_install(group, const.GROUP_PACKAGE_TYPES)
- for pkg_spec in pkg_specs:
+ try:
+ base.group_upgrade(group)
+ except dnf.exceptions.CompsError:
+ # If not already installed, try to install.
+ base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
+ except dnf.exceptions.Error as e:
+ failures.append((group, e))
+
+ for environment in environments:
try:
- base.upgrade(pkg_spec)
- except dnf.exceptions.MarkingError:
- # If not already installed, try to install.
- _mark_package_install(module, base, pkg_spec)
+ try:
+ base.environment_upgrade(environment)
+ except dnf.exceptions.CompsError:
+ # If not already installed, try to install.
+ base.environment_install(group, dnf.const.GROUP_PACKAGE_TYPES)
+ except dnf.exceptions.Error as e:
+ failures.append((group, e))
+
+ for pkg_spec in pkg_specs:
+ # best effort causes to install the latest package
+ # even if not previously installed
+ base.conf.best = True
+ base.install(pkg_spec)
else:
+ # state == absent
if filenames:
module.fail_json(
msg="Cannot remove paths -- please specify package name.")
- installed = base.sack.query().installed()
for group in groups:
- if installed.filter(name=group.name):
+ try:
base.group_remove(group)
+ except dnf.exceptions.CompsError:
+ # Group is already uninstalled.
+ pass
+
+ for envioronment in environments:
+ try:
+ base.environment_remove(environment)
+ except dnf.exceptions.CompsError:
+ # Environment is already uninstalled.
+ pass
+
+ installed = base.sack.query().installed()
for pkg_spec in pkg_specs:
if installed.filter(name=pkg_spec):
base.remove(pkg_spec)
- if not base.resolve():
+ # Like the dnf CLI we want to allow recursive removal of dependent
+ # packages
+ allow_erasing = True
+
+ if not base.resolve(allow_erasing=allow_erasing):
+ if failures:
+ module.fail_json(msg='Failed to install some of the specified packages',
+ failures=failures)
module.exit_json(msg="Nothing to do")
else:
if module.check_mode:
+ if failures:
+ module.fail_json(msg='Failed to install some of the specified packages',
+ failures=failures)
module.exit_json(changed=True)
+
base.download_packages(base.transaction.install_set)
base.do_transaction()
response = {'changed': True, 'results': []}
for package in base.transaction.install_set:
- response['results'].append("Installed: {}".format(package))
+ response['results'].append("Installed: {0}".format(package))
for package in base.transaction.remove_set:
- response['results'].append("Removed: {}".format(package))
+ response['results'].append("Removed: {0}".format(package))
+ if failures:
+ module.fail_json(msg='Failed to install some of the specified packages',
+ failures=failures)
module.exit_json(**response)
@@ -323,23 +448,32 @@ def main():
enablerepo=dict(type='list', default=[]),
disablerepo=dict(type='list', default=[]),
list=dict(),
- conf_file=dict(default=None),
+ conf_file=dict(default=None, type='path'),
disable_gpg_check=dict(default=False, type='bool'),
),
required_one_of=[['name', 'list']],
mutually_exclusive=[['name', 'list']],
supports_check_mode=True)
params = module.params
- base = _base(
- module, params['conf_file'], params['disable_gpg_check'],
- params['disablerepo'], params['enablerepo'])
+
+ _ensure_dnf(module)
+
if params['list']:
+ base = _base(
+ module, params['conf_file'], params['disable_gpg_check'],
+ params['disablerepo'], params['enablerepo'])
list_items(module, base, params['list'])
else:
+ # Note: base takes a long time to run so we want to check for failure
+ # before running it.
+ if not dnf.util.am_i_root():
+ module.fail_json(msg="This command has to be run under the root user.")
+ base = _base(
+ module, params['conf_file'], params['disable_gpg_check'],
+ params['disablerepo'], params['enablerepo'])
+
ensure(module, base, params['state'], params['name'])
-# import module snippets
-from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py
old mode 100644
new mode 100755
index 91888ba6bca..c44ccabbe6f
--- a/packaging/os/homebrew.py
+++ b/packaging/os/homebrew.py
@@ -3,6 +3,7 @@
# (c) 2013, Andrew Dunham
# (c) 2013, Daniel Jaouen
+# (c) 2015, Indrajit Raychaudhuri
#
# Based on macports (Jimmy Tang )
#
@@ -19,12 +20,19 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: homebrew
author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
- "Daniel Jaouen (@danieljaouen)"
- "Andrew Dunham (@andrew-d)"
+requirements:
+ - "python >= 2.6"
short_description: Package manager for Homebrew
description:
- Manages Homebrew packages
@@ -35,6 +43,12 @@
- name of package to install/remove
required: false
default: None
+ aliases: ['pkg', 'package', 'formula']
+ path:
+ description:
+ - "':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
+ required: false
+ default: '/usr/local/bin'
state:
description:
- state of the package
@@ -45,37 +59,82 @@
description:
- update homebrew itself first
required: false
- default: "no"
+ default: no
choices: [ "yes", "no" ]
+ aliases: ['update-brew']
upgrade_all:
description:
- upgrade all homebrew packages
required: false
- default: "no"
+ default: no
choices: [ "yes", "no" ]
+ aliases: ['upgrade']
install_options:
description:
- options flags to install a package
required: false
default: null
+ aliases: ['options']
version_added: "1.4"
notes: []
'''
EXAMPLES = '''
-- homebrew: name=foo state=present
-- homebrew: name=foo state=present update_homebrew=yes
-- homebrew: name=foo state=latest update_homebrew=yes
-- homebrew: update_homebrew=yes upgrade_all=yes
-- homebrew: name=foo state=head
-- homebrew: name=foo state=linked
-- homebrew: name=foo state=absent
-- homebrew: name=foo,bar state=absent
-- homebrew: name=foo state=present install_options=with-baz,enable-debug
+# Install formula foo with 'brew' in default path (C(/usr/local/bin))
+- homebrew:
+ name: foo
+ state: present
+
+# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
+- homebrew:
+ name: foo
+ path: /my/other/location/bin
+ state: present
+
+# Update homebrew first and install formula foo with 'brew' in default path
+- homebrew:
+ name: foo
+ state: present
+ update_homebrew: yes
+
+# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path
+- homebrew:
+ name: foo
+ state: latest
+ update_homebrew: yes
+
+# Update homebrew and upgrade all packages
+- homebrew:
+ update_homebrew: yes
+ upgrade_all: yes
+
+# Miscellaneous other examples
+- homebrew:
+ name: foo
+ state: head
+
+- homebrew:
+ name: foo
+ state: linked
+
+- homebrew:
+ name: foo
+ state: absent
+
+- homebrew:
+ name: foo,bar
+ state: absent
+
+- homebrew:
+ name: foo
+ state: present
+ install_options: with-baz,enable-debug
'''
import os.path
import re
+from ansible.module_utils.six import iteritems
+
# exceptions -------------------------------------------------------------- {{{
class HomebrewException(Exception):
@@ -119,6 +178,7 @@ class Homebrew(object):
/ # slash (for taps)
\+ # plusses
- # dashes
+ : # colons (for URLs)
'''
INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
@@ -300,7 +360,7 @@ def current_package(self, package):
return package
# /class properties -------------------------------------------- }}}
- def __init__(self, module, path=None, packages=None, state=None,
+ def __init__(self, module, path, packages=None, state=None,
update_homebrew=False, upgrade_all=False,
install_options=None):
if not install_options:
@@ -322,17 +382,12 @@ def _setup_status_vars(self):
self.message = ''
def _setup_instance_vars(self, **kwargs):
- for key, val in kwargs.iteritems():
+ for key, val in iteritems(kwargs):
setattr(self, key, val)
def _prep(self):
- self._prep_path()
self._prep_brew_path()
- def _prep_path(self):
- if not self.path:
- self.path = ['/usr/local/bin']
-
def _prep_brew_path(self):
if not self.module:
self.brew_path = None
@@ -394,18 +449,17 @@ def _current_package_is_installed(self):
return False
- def _outdated_packages(self):
+ def _current_package_is_outdated(self):
+ if not self.valid_package(self.current_package):
+ return False
+
rc, out, err = self.module.run_command([
self.brew_path,
'outdated',
+ self.current_package,
])
- return [line.split(' ')[0].strip() for line in out.split('\n') if line]
- def _current_package_is_outdated(self):
- if not self.valid_package(self.current_package):
- return False
-
- return self.current_package in self._outdated_packages()
+ return rc != 0
def _current_package_is_installed_from_head(self):
if not Homebrew.valid_package(self.current_package):
@@ -763,8 +817,16 @@ def _unlink_packages(self):
def main():
module = AnsibleModule(
argument_spec=dict(
- name=dict(aliases=["pkg"], required=False),
- path=dict(required=False),
+ name=dict(
+ aliases=["pkg", "package", "formula"],
+ required=False,
+ type='list',
+ ),
+ path=dict(
+ default="/usr/local/bin",
+ required=False,
+ type='path',
+ ),
state=dict(
default="present",
choices=[
@@ -775,12 +837,12 @@ def main():
],
),
update_homebrew=dict(
- default="no",
+ default=False,
aliases=["update-brew"],
type='bool',
),
upgrade_all=dict(
- default="no",
+ default=False,
aliases=["upgrade"],
type='bool',
),
@@ -792,18 +854,19 @@ def main():
),
supports_check_mode=True,
)
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
p = module.params
if p['name']:
- packages = p['name'].split(',')
+ packages = p['name']
else:
packages = None
path = p['path']
if path:
path = path.split(':')
- else:
- path = ['/usr/local/bin']
state = p['state']
if state in ('present', 'installed'):
@@ -839,4 +902,3 @@ def main():
if __name__ == '__main__':
main()
-
diff --git a/packaging/os/homebrew_cask.py b/packaging/os/homebrew_cask.py
old mode 100644
new mode 100755
index e1b721a97b4..86d7f35e0ca
--- a/packaging/os/homebrew_cask.py
+++ b/packaging/os/homebrew_cask.py
@@ -2,6 +2,7 @@
# -*- coding: utf-8 -*-
# (c) 2013, Daniel Jaouen
+# (c) 2016, Indrajit Raychaudhuri
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -16,10 +17,19 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: homebrew_cask
-author: "Daniel Jaouen (@danieljaouen)"
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+ - "Enric Lluelles (@enriclluelles)"
+requirements:
+ - "python >= 2.6"
short_description: Install/uninstall homebrew casks.
description:
- Manages Homebrew casks.
@@ -29,21 +39,65 @@
description:
- name of cask to install/remove
required: true
+ aliases: ['pkg', 'package', 'cask']
+ path:
+ description:
+ - "':' separated list of paths to search for 'brew' executable."
+ required: false
+ default: '/usr/local/bin'
state:
description:
- state of the cask
choices: [ 'present', 'absent' ]
required: false
default: present
+ update_homebrew:
+ description:
+ - update homebrew itself first. Note that C(brew cask update) is
+ a synonym for C(brew update).
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ aliases: ['update-brew']
+ version_added: "2.2"
+ install_options:
+ description:
+ - options flags to install a package
+ required: false
+ default: null
+ aliases: ['options']
+ version_added: "2.2"
'''
EXAMPLES = '''
-- homebrew_cask: name=alfred state=present
-- homebrew_cask: name=alfred state=absent
+- homebrew_cask:
+ name: alfred
+ state: present
+
+- homebrew_cask:
+ name: alfred
+ state: absent
+
+- homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'appdir=/Applications'
+
+- homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'debug,appdir=/Applications'
+
+- homebrew_cask:
+ name: alfred
+ state: absent
+ install_options: force
'''
import os.path
import re
+from ansible.module_utils.six import iteritems
+
# exceptions -------------------------------------------------------------- {{{
class HomebrewCaskException(Exception):
@@ -69,6 +123,7 @@ class HomebrewCask(object):
\s # spaces
: # colons
{sep} # the OS-specific path separator
+ . # dots
- # dashes
'''.format(sep=os.path.sep)
@@ -76,11 +131,14 @@ class HomebrewCask(object):
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
{sep} # the OS-specific path separator
+ . # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_CASK_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
- # dashes
'''
@@ -98,6 +156,7 @@ def valid_path(cls, path):
- a string containing only:
- alphanumeric characters
- dashes
+ - dots
- spaces
- colons
- os.path.sep
@@ -122,6 +181,7 @@ def valid_brew_path(cls, brew_path):
- a string containing only:
- alphanumeric characters
- dashes
+ - dots
- spaces
- os.path.sep
'''
@@ -170,6 +230,7 @@ def valid_module(cls, module):
'''A valid module is an instance of AnsibleModule.'''
return isinstance(module, AnsibleModule)
+
# /class validations ------------------------------------------- }}}
# class properties --------------------------------------------- {{{
@@ -251,10 +312,14 @@ def current_cask(self, cask):
return cask
# /class properties -------------------------------------------- }}}
- def __init__(self, module, path=None, casks=None, state=None):
+ def __init__(self, module, path=path, casks=None, state=None,
+ update_homebrew=False, install_options=None):
+ if not install_options:
+ install_options = list()
self._setup_status_vars()
self._setup_instance_vars(module=module, path=path, casks=casks,
- state=state)
+ state=state, update_homebrew=update_homebrew,
+ install_options=install_options,)
self._prep()
@@ -267,17 +332,12 @@ def _setup_status_vars(self):
self.message = ''
def _setup_instance_vars(self, **kwargs):
- for key, val in kwargs.iteritems():
+ for key, val in iteritems(kwargs):
setattr(self, key, val)
def _prep(self):
- self._prep_path()
self._prep_brew_path()
- def _prep_path(self):
- if not self.path:
- self.path = ['/usr/local/bin']
-
def _prep_brew_path(self):
if not self.module:
self.brew_path = None
@@ -324,8 +384,12 @@ def _current_cask_is_installed(self):
self.message = 'Invalid cask: {0}.'.format(self.current_cask)
raise HomebrewCaskException(self.message)
- cmd = [self.brew_path, 'cask', 'list']
- rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0])
+ cmd = [
+ "{brew_path}".format(brew_path=self.brew_path),
+ "cask",
+ "list"
+ ]
+ rc, out, err = self.module.run_command(cmd)
if 'nothing to list' in err:
return False
@@ -340,6 +404,9 @@ def _current_cask_is_installed(self):
# commands ----------------------------------------------------- {{{
def _run(self):
+ if self.update_homebrew:
+ self._update_homebrew()
+
if self.state == 'installed':
return self._install_casks()
elif self.state == 'absent':
@@ -353,7 +420,7 @@ def _update_homebrew(self):
rc, out, err = self.module.run_command([
self.brew_path,
'update',
- ], path_prefix=self.path[0])
+ ])
if rc == 0:
if out and isinstance(out, basestring):
already_updated = any(
@@ -395,11 +462,13 @@ def _install_current_cask(self):
)
raise HomebrewCaskException(self.message)
- cmd = [opt
- for opt in (self.brew_path, 'cask', 'install', self.current_cask)
- if opt]
+ opts = (
+ [self.brew_path, 'cask', 'install', self.current_cask]
+ + self.install_options
+ )
- rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0])
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
if self._current_cask_is_installed():
self.changed_count += 1
@@ -444,7 +513,7 @@ def _uninstall_current_cask(self):
for opt in (self.brew_path, 'cask', 'uninstall', self.current_cask)
if opt]
- rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0])
+ rc, out, err = self.module.run_command(cmd)
if not self._current_cask_is_installed():
self.changed_count += 1
@@ -469,8 +538,16 @@ def _uninstall_casks(self):
def main():
module = AnsibleModule(
argument_spec=dict(
- name=dict(aliases=["cask"], required=False),
- path=dict(required=False),
+ name=dict(
+ aliases=["pkg", "package", "cask"],
+ required=False,
+ type='list',
+ ),
+ path=dict(
+ default="/usr/local/bin",
+ required=False,
+ type='path',
+ ),
state=dict(
default="present",
choices=[
@@ -478,21 +555,32 @@ def main():
"absent", "removed", "uninstalled",
],
),
+ update_homebrew=dict(
+ default=False,
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ )
),
supports_check_mode=True,
)
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
p = module.params
if p['name']:
- casks = p['name'].split(',')
+ casks = p['name']
else:
casks = None
path = p['path']
if path:
path = path.split(':')
- else:
- path = ['/usr/local/bin']
state = p['state']
if state in ('present', 'installed'):
@@ -500,8 +588,14 @@ def main():
if state in ('absent', 'removed', 'uninstalled'):
state = 'absent'
+ update_homebrew = p['update_homebrew']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
brew_cask = HomebrewCask(module=module, path=path, casks=casks,
- state=state)
+ state=state, update_homebrew=update_homebrew,
+ install_options=install_options)
(failed, changed, message) = brew_cask.run()
if failed:
module.fail_json(msg=message)
@@ -513,4 +607,3 @@ def main():
if __name__ == '__main__':
main()
-
diff --git a/packaging/os/homebrew_tap.py b/packaging/os/homebrew_tap.py
index c6511f0c7b2..649a32f1b89 100644
--- a/packaging/os/homebrew_tap.py
+++ b/packaging/os/homebrew_tap.py
@@ -2,6 +2,8 @@
# -*- coding: utf-8 -*-
# (c) 2013, Daniel Jaouen
+# (c) 2016, Indrajit Raychaudhuri
+#
# Based on homebrew (Andrew Dunham )
#
# This file is part of Ansible
@@ -21,19 +23,36 @@
import re
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: homebrew_tap
-author: "Daniel Jaouen (@danieljaouen)"
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
short_description: Tap a Homebrew repository.
description:
- Tap external Homebrew repositories.
version_added: "1.6"
options:
- tap:
+ name:
description:
- - The repository to tap.
+ - The GitHub user/organization repository to tap.
required: true
+ aliases: ['tap']
+ url:
+ description:
+ - The optional git URL of the repository to tap. The URL is not
+ assumed to be on GitHub, and the protocol doesn't have to be HTTP.
+ Any location and protocol that git can handle is fine.
+ required: false
+ version_added: "2.2"
+ note:
+ - I(name) option may not be a list of multiple taps (but a single
+ tap instead) when this option is provided.
state:
description:
- state of the repository.
@@ -44,9 +63,20 @@
'''
EXAMPLES = '''
-homebrew_tap: tap=homebrew/dupes state=present
-homebrew_tap: tap=homebrew/dupes state=absent
-homebrew_tap: tap=homebrew/dupes,homebrew/science state=present
+- homebrew_tap:
+ name: homebrew/dupes
+
+- homebrew_tap:
+ name: homebrew/dupes
+ state: absent
+
+- homebrew_tap:
+ name: homebrew/dupes,homebrew/science
+ state: present
+
+- homebrew_tap:
+ name: telemachus/brew
+ url: 'https://bitbucket.org/telemachus/brew'
'''
@@ -63,11 +93,14 @@ def already_tapped(module, brew_path, tap):
brew_path,
'tap',
])
+
taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
- return tap.lower() in taps
+ tap_name = re.sub('homebrew-', '', tap.lower())
+ return tap_name in taps
-def add_tap(module, brew_path, tap):
+
+def add_tap(module, brew_path, tap, url=None):
'''Adds a single tap.'''
failed, changed, msg = False, False, ''
@@ -83,6 +116,7 @@ def add_tap(module, brew_path, tap):
brew_path,
'tap',
tap,
+ url,
])
if already_tapped(module, brew_path, tap):
changed = True
@@ -180,7 +214,8 @@ def remove_taps(module, brew_path, taps):
def main():
module = AnsibleModule(
argument_spec=dict(
- name=dict(aliases=['tap'], required=True),
+ name=dict(aliases=['tap'], type='list', required=True),
+ url=dict(default=None, required=False),
state=dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True,
@@ -192,10 +227,22 @@ def main():
opt_dirs=['/usr/local/bin']
)
- taps = module.params['name'].split(',')
+ taps = module.params['name']
+ url = module.params['url']
if module.params['state'] == 'present':
- failed, changed, msg = add_taps(module, brew_path, taps)
+ if url is None:
+ # No tap URL provided explicitly, continue with bulk addition
+ # of all the taps.
+ failed, changed, msg = add_taps(module, brew_path, taps)
+ else:
+ # When an tap URL is provided explicitly, we allow adding
+ # *single* tap only. Validate and proceed to add single tap.
+ if len(taps) > 1:
+ msg = "List of muliple taps may not be provided with 'url' option."
+ module.fail_json(msg=msg)
+ else:
+ failed, changed, msg = add_tap(module, brew_path, taps[0], url)
if failed:
module.fail_json(msg=msg)
diff --git a/packaging/os/layman.py b/packaging/os/layman.py
index f9ace121201..440001b48a0 100644
--- a/packaging/os/layman.py
+++ b/packaging/os/layman.py
@@ -21,6 +21,10 @@
import shutil
from os import path
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: layman
@@ -64,19 +68,29 @@
EXAMPLES = '''
# Install the overlay 'mozilla' which is on the central overlays list.
-- layman: name=mozilla
+- layman:
+ name: mozilla
# Install the overlay 'cvut' from the specified alternative list.
-- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml
+- layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
# Update (sync) the overlay 'cvut', or install if not installed yet.
-- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml state=updated
+- layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
+ state: updated
# Update (sync) all of the installed overlays.
-- layman: name=ALL state=updated
+- layman:
+ name: ALL
+ state: updated
# Uninstall the overlay 'cvut'.
-- layman: name=cvut state=absent
+- layman:
+ name: cvut
+ state: absent
'''
USERAGENT = 'ansible-httpget'
@@ -120,7 +134,7 @@ def download_url(module, url, dest):
try:
with open(dest, 'w') as f:
shutil.copyfileobj(response, f)
- except IOError, e:
+ except IOError as e:
raise ModuleError("Failed to write: %s" % str(e))
@@ -248,7 +262,7 @@ def main():
else:
changed = uninstall_overlay(module, name)
- except ModuleError, e:
+ except ModuleError as e:
module.fail_json(msg=e.message)
else:
module.exit_json(changed=changed, name=name)
diff --git a/packaging/os/macports.py b/packaging/os/macports.py
index ca3a0f97426..ac49f1568e5 100644
--- a/packaging/os/macports.py
+++ b/packaging/os/macports.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: macports
@@ -46,11 +50,26 @@
notes: []
'''
EXAMPLES = '''
-- macports: name=foo state=present
-- macports: name=foo state=present update_cache=yes
-- macports: name=foo state=absent
-- macports: name=foo state=active
-- macports: name=foo state=inactive
+- macports:
+ name: foo
+ state: present
+
+- macports:
+ name: foo
+ state: present
+ update_cache: yes
+
+- macports:
+ name: foo
+ state: absent
+
+- macports:
+ name: foo
+ state: active
+
+- macports:
+ name: foo
+ state: inactive
'''
import pipes
@@ -214,4 +233,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/openbsd_pkg.py b/packaging/os/openbsd_pkg.py
index 13cafa26bc5..7d0e9ac9459 100644
--- a/packaging/os/openbsd_pkg.py
+++ b/packaging/os/openbsd_pkg.py
@@ -18,8 +18,17 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+import os
+import platform
import re
import shlex
+import sqlite3
+
+from distutils.version import StrictVersion
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -29,6 +38,7 @@
short_description: Manage packages on OpenBSD.
description:
- Manage packages on OpenBSD using the pkg tools.
+requirements: [ "python >= 2.5" ]
options:
name:
required: true
@@ -41,26 +51,66 @@
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
+ build:
+ required: false
+ choices: [ yes, no ]
+ default: no
+ description:
+ - Build the package from source instead of downloading and installing
+ a binary. Requires that the port source tree is already installed.
+ Automatically builds and installs the 'sqlports' package, if it is
+ not already installed.
+ version_added: "2.1"
+ ports_dir:
+ required: false
+ default: /usr/ports
+ description:
+ - When used in combination with the 'build' option, allows overriding
+ the default ports source directory.
+ version_added: "2.1"
'''
EXAMPLES = '''
# Make sure nmap is installed
-- openbsd_pkg: name=nmap state=present
+- openbsd_pkg:
+ name: nmap
+ state: present
# Make sure nmap is the latest version
-- openbsd_pkg: name=nmap state=latest
+- openbsd_pkg:
+ name: nmap
+ state: latest
# Make sure nmap is not installed
-- openbsd_pkg: name=nmap state=absent
+- openbsd_pkg:
+ name: nmap
+ state: absent
+
+# Make sure nmap is installed, build it from source if it is not
+- openbsd_pkg:
+ name: nmap
+ state: present
+ build: yes
# Specify a pkg flavour with '--'
-- openbsd_pkg: name=vim--nox11 state=present
+- openbsd_pkg:
+ name: vim--no_x11
+ state: present
# Specify the default flavour to avoid ambiguity errors
-- openbsd_pkg: name=vim-- state=present
+- openbsd_pkg:
+ name: vim--
+ state: present
+
+# Specify a package branch (requires at least OpenBSD 6.0)
+- openbsd_pkg:
+ name: python%3.5
+ state: present
# Update all packages on the system
-- openbsd_pkg: name=* state=latest
+- openbsd_pkg:
+ name: *
+ state: latest
'''
# Function used for executing commands.
@@ -71,70 +121,69 @@ def execute_command(cmd, module):
cmd_args = shlex.split(cmd)
return module.run_command(cmd_args)
-# Function used for getting the name of a currently installed package.
-def get_current_name(name, pkg_spec, module):
- info_cmd = 'pkg_info'
- (rc, stdout, stderr) = execute_command("%s" % (info_cmd), module)
- if rc != 0:
- return (rc, stdout, stderr)
-
- if pkg_spec['version']:
- pattern = "^%s" % name
- elif pkg_spec['flavor']:
- pattern = "^%s-.*-%s\s" % (pkg_spec['stem'], pkg_spec['flavor'])
- else:
- pattern = "^%s-" % pkg_spec['stem']
-
- module.debug("get_current_name(): pattern = %s" % pattern)
-
- for line in stdout.splitlines():
- module.debug("get_current_name: line = %s" % line)
- match = re.search(pattern, line)
- if match:
- current_name = line.split()[0]
-
- return current_name
-
# Function used to find out if a package is currently installed.
def get_package_state(name, pkg_spec, module):
- info_cmd = 'pkg_info -e'
+ info_cmd = 'pkg_info -Iq'
- if pkg_spec['version']:
- command = "%s %s" % (info_cmd, name)
- elif pkg_spec['flavor']:
- command = "%s %s-*-%s" % (info_cmd, pkg_spec['stem'], pkg_spec['flavor'])
- else:
- command = "%s %s-*" % (info_cmd, pkg_spec['stem'])
+ command = "%s inst:%s" % (info_cmd, name)
rc, stdout, stderr = execute_command(command, module)
- if (stderr):
+ if stderr:
module.fail_json(msg="failed in get_package_state(): " + stderr)
- if rc == 0:
+ if stdout:
+ # If the requested package name is just a stem, like "python", we may
+ # find multiple packages with that name.
+ pkg_spec['installed_names'] = [name for name in stdout.splitlines()]
+ module.debug("get_package_state(): installed_names = %s" % pkg_spec['installed_names'])
return True
else:
return False
# Function used to make sure a package is present.
def package_present(name, installed_state, pkg_spec, module):
+ build = module.params['build']
+
if module.check_mode:
install_cmd = 'pkg_add -Imn'
else:
- install_cmd = 'pkg_add -Im'
+ if build is True:
+ port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
+ if os.path.isdir(port_dir):
+ if pkg_spec['flavor']:
+ flavors = pkg_spec['flavor'].replace('-', ' ')
+ install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
+ elif pkg_spec['subpackage']:
+ install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir, pkg_spec['subpackage'])
+ else:
+ install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
+ else:
+ module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
+ else:
+ install_cmd = 'pkg_add -Im'
if installed_state is False:
# Attempt to install the package
- (rc, stdout, stderr) = execute_command("%s %s" % (install_cmd, name), module)
+ if build is True and not module.check_mode:
+ (rc, stdout, stderr) = module.run_command(install_cmd, module, use_unsafe_shell=True)
+ else:
+ (rc, stdout, stderr) = execute_command("%s %s" % (install_cmd, name), module)
# The behaviour of pkg_add is a bit different depending on if a
# specific version is supplied or not.
#
# When a specific version is supplied the return code will be 0 when
- # a package is found and 1 when it is not, if a version is not
- # supplied the tool will exit 0 in both cases:
- if pkg_spec['version']:
+ # a package is found and 1 when it is not. If a version is not
+ # supplied the tool will exit 0 in both cases.
+ #
+ # It is important to note that "version" relates to the
+ # packages-specs(7) notion of a version. If using the branch syntax
+ # (like "python%3.5") the version number is considered part of the
+ # stem, and the pkg_add behavior behaves the same as if the name did
+ # not contain a version (which it strictly speaking does not).
+ if pkg_spec['version'] or build is True:
# Depend on the return code.
module.debug("package_present(): depending on return code")
if rc:
@@ -148,11 +197,14 @@ def package_present(name, installed_state, pkg_spec, module):
# "file:/local/package/directory/ is empty" message on stderr
# while still installing the package, so we need to look for
# for a message like "packagename-1.0: ok" just in case.
- match = re.search("\W%s-[^:]+: ok\W" % name, stdout)
+ if pkg_spec['style'] == 'branch':
+ match = re.search("\W%s-[^:]+: ok\W" % pkg_spec['pkgname'], stdout)
+ else:
+ match = re.search("\W%s-[^:]+: ok\W" % name, stdout)
+
if match:
# It turns out we were able to install the package.
- module.debug("package_present(): we were able to install package")
- pass
+ module.debug("package_present(): we were able to install the package")
else:
# We really did fail, fake the return code.
module.debug("package_present(): we really did fail")
@@ -177,6 +229,10 @@ def package_present(name, installed_state, pkg_spec, module):
# Function used to make sure a package is the latest available version.
def package_latest(name, installed_state, pkg_spec, module):
+
+ if module.params['build'] is True:
+ module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
+
if module.check_mode:
upgrade_cmd = 'pkg_add -umn'
else:
@@ -186,25 +242,23 @@ def package_latest(name, installed_state, pkg_spec, module):
if installed_state is True:
- # Fetch name of currently installed package.
- pre_upgrade_name = get_current_name(name, pkg_spec, module)
-
- module.debug("package_latest(): pre_upgrade_name = %s" % pre_upgrade_name)
-
# Attempt to upgrade the package.
(rc, stdout, stderr) = execute_command("%s %s" % (upgrade_cmd, name), module)
# Look for output looking something like "nmap-6.01->6.25: ok" to see if
# something changed (or would have changed). Use \W to delimit the match
# from progress meter output.
- match = re.search("\W%s->.+: ok\W" % pre_upgrade_name, stdout)
- if match:
- if module.check_mode:
- module.exit_json(changed=True)
-
- changed = True
- else:
- changed = False
+ changed = False
+ for installed_name in pkg_spec['installed_names']:
+ module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
+ match = re.search("\W%s->.+: ok\W" % installed_name, stdout)
+ if match:
+ module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ changed = True
+ break
# FIXME: This part is problematic. Based on the issues mentioned (and
# handled) in package_present() it is not safe to blindly trust stderr
@@ -256,7 +310,12 @@ def package_absent(name, installed_state, module):
# Function used to parse the package name based on packages-specs(7).
# The general name structure is "stem-version[-flavors]".
+#
+# Names containing "%" are a special variation not part of the
+# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a
+# description.
def parse_package_name(name, pkg_spec, module):
+ module.debug("parse_package_name(): parsing name: %s" % name)
# Do some initial matches so we can base the more advanced regex on that.
version_match = re.search("-[0-9]", name)
versionless_match = re.search("--", name)
@@ -264,7 +323,7 @@ def parse_package_name(name, pkg_spec, module):
# Stop if someone is giving us a name that both has a version and is
# version-less at the same time.
if version_match and versionless_match:
- module.fail_json(msg="Package name both has a version and is version-less: " + name)
+ module.fail_json(msg="package name both has a version and is version-less: " + name)
# If name includes a version.
if version_match:
@@ -275,8 +334,9 @@ def parse_package_name(name, pkg_spec, module):
pkg_spec['version'] = match.group('version')
pkg_spec['flavor_separator'] = match.group('flavor_separator')
pkg_spec['flavor'] = match.group('flavor')
+ pkg_spec['style'] = 'version'
else:
- module.fail_json(msg="Unable to parse package name at version_match: " + name)
+ module.fail_json(msg="unable to parse package name at version_match: " + name)
# If name includes no version but is version-less ("--").
elif versionless_match:
@@ -287,8 +347,9 @@ def parse_package_name(name, pkg_spec, module):
pkg_spec['version'] = None
pkg_spec['flavor_separator'] = '-'
pkg_spec['flavor'] = match.group('flavor')
+ pkg_spec['style'] = 'versionless'
else:
- module.fail_json(msg="Unable to parse package name at versionless_match: " + name)
+ module.fail_json(msg="unable to parse package name at versionless_match: " + name)
# If name includes no version, and is not version-less, it is all a stem.
else:
@@ -299,15 +360,83 @@ def parse_package_name(name, pkg_spec, module):
pkg_spec['version'] = None
pkg_spec['flavor_separator'] = None
pkg_spec['flavor'] = None
+ pkg_spec['style'] = 'stem'
else:
- module.fail_json(msg="Unable to parse package name at else: " + name)
+ module.fail_json(msg="unable to parse package name at else: " + name)
+
+ # If the stem contains an "%" then it needs special treatment.
+ branch_match = re.search("%", pkg_spec['stem'])
+ if branch_match:
+
+ branch_release = "6.0"
+
+ if version_match or versionless_match:
+ module.fail_json(msg="package name using 'branch' syntax also has a version or is version-less: " + name)
+ if StrictVersion(platform.release()) < StrictVersion(branch_release):
+ module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
+
+ pkg_spec['style'] = 'branch'
+
+ # Key names from description in pkg_add(1).
+ pkg_spec['pkgname'] = pkg_spec['stem'].split('%')[0]
+ pkg_spec['branch'] = pkg_spec['stem'].split('%')[1]
# Sanity check that there are no trailing dashes in flavor.
# Try to stop strange stuff early so we can be strict later.
if pkg_spec['flavor']:
match = re.search("-$", pkg_spec['flavor'])
if match:
- module.fail_json(msg="Trailing dash in flavor: " + pkg_spec['flavor'])
+ module.fail_json(msg="trailing dash in flavor: " + pkg_spec['flavor'])
+
+# Function used for figuring out the port path.
+def get_package_source_path(name, pkg_spec, module):
+ pkg_spec['subpackage'] = None
+ if pkg_spec['stem'] == 'sqlports':
+ return 'databases/sqlports'
+ else:
+ # try for an exact match first
+ sqlports_db_file = '/usr/local/share/sqlports'
+ if not os.path.isfile(sqlports_db_file):
+ module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
+
+ conn = sqlite3.connect(sqlports_db_file)
+ first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
+ query = first_part_of_query + ' = ?'
+ module.debug("package_package_source_path(): exact query: %s" % query)
+ cursor = conn.execute(query, (name,))
+ results = cursor.fetchall()
+
+ # next, try for a fuzzier match
+ if len(results) < 1:
+ looking_for = pkg_spec['stem'] + (pkg_spec['version_separator'] or '-') + (pkg_spec['version'] or '%')
+ query = first_part_of_query + ' LIKE ?'
+ if pkg_spec['flavor']:
+ looking_for += pkg_spec['flavor_separator'] + pkg_spec['flavor']
+ module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ elif pkg_spec['style'] == 'versionless':
+ query += ' AND fullpkgname NOT LIKE ?'
+ module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
+ cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
+ else:
+ module.debug("package_package_source_path(): fuzzy query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ results = cursor.fetchall()
+
+ # error if we don't find exactly 1 match
+ conn.close()
+ if len(results) < 1:
+ module.fail_json(msg="could not find a port by the name '%s'" % name)
+ if len(results) > 1:
+ matches = map(lambda x:x[1], results)
+ module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
+
+ # there's exactly 1 match, so figure out the subpackage, if any, then return
+ fullpkgpath = results[0][0]
+ parts = fullpkgpath.split(',')
+ if len(parts) > 1 and parts[1][0] == '-':
+ pkg_spec['subpackage'] = parts[1]
+ return parts[0]
# Function used for upgrading all installed packages.
def upgrade_packages(module):
@@ -348,12 +477,16 @@ def main():
argument_spec = dict(
name = dict(required=True),
state = dict(required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ build = dict(default='no', type='bool'),
+ ports_dir = dict(default='/usr/ports'),
),
supports_check_mode = True
)
name = module.params['name']
state = module.params['state']
+ build = module.params['build']
+ ports_dir = module.params['ports_dir']
rc = 0
stdout = ''
@@ -361,6 +494,19 @@ def main():
result = {}
result['name'] = name
result['state'] = state
+ result['build'] = build
+
+ if build is True:
+ if not os.path.isdir(ports_dir):
+ module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
+
+ # build sqlports if its not installed yet
+ pkg_spec = {}
+ parse_package_name('sqlports', pkg_spec, module)
+ installed_state = get_package_state('sqlports', pkg_spec, module)
+ if not installed_state:
+ module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
+ package_present('sqlports', installed_state, pkg_spec, module)
if name == '*':
if state != 'latest':
@@ -373,6 +519,11 @@ def main():
pkg_spec = {}
parse_package_name(name, pkg_spec, module)
+ # Not sure how the branch syntax is supposed to play together
+ # with build mode. Disable it for now.
+ if pkg_spec['style'] == 'branch' and module.params['build'] is True:
+ module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], name))
+
# Get package state.
installed_state = get_package_state(name, pkg_spec, module)
@@ -396,4 +547,6 @@ def main():
# Import module snippets.
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/opkg.py b/packaging/os/opkg.py
index 9ac8f99b8c8..6360f45af33 100644
--- a/packaging/os/opkg.py
+++ b/packaging/os/opkg.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: opkg
@@ -52,11 +56,27 @@
notes: []
'''
EXAMPLES = '''
-- opkg: name=foo state=present
-- opkg: name=foo state=present update_cache=yes
-- opkg: name=foo state=absent
-- opkg: name=foo,bar state=absent
-- opkg: name=foo state=present force=overwrite
+- opkg:
+ name: foo
+ state: present
+
+- opkg:
+ name: foo
+ state: present
+ update_cache: yes
+
+- opkg:
+ name: foo
+ state: absent
+
+- opkg:
+ name: foo,bar
+ state: absent
+
+- opkg:
+ name: foo
+ state: present
+ force: overwrite
'''
import pipes
@@ -166,4 +186,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/pacman.py b/packaging/os/pacman.py
index da85a6c0a1f..89766a49745 100644
--- a/packaging/os/pacman.py
+++ b/packaging/os/pacman.py
@@ -20,6 +20,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: pacman
@@ -40,6 +44,7 @@
- Name of the package to install, upgrade, or remove.
required: false
default: null
+ aliases: [ 'pkg', 'package' ]
state:
description:
@@ -60,7 +65,9 @@
force:
description:
- - Force remove package, without any checks.
+ - When removing package - force remove package, without any
+ checks. When update_cache - force redownload repo
+ databases.
required: false
default: no
choices: ["yes", "no"]
@@ -73,6 +80,7 @@
required: false
default: no
choices: ["yes", "no"]
+ aliases: [ 'update-cache' ]
upgrade:
description:
@@ -85,31 +93,47 @@
EXAMPLES = '''
# Install package foo
-- pacman: name=foo state=present
+- pacman:
+ name: foo
+ state: present
# Upgrade package foo
-- pacman: name=foo state=latest update_cache=yes
+- pacman:
+ name: foo
+ state: latest
+ update_cache: yes
# Remove packages foo and bar
-- pacman: name=foo,bar state=absent
+- pacman:
+ name: foo,bar
+ state: absent
# Recursively remove package baz
-- pacman: name=baz state=absent recurse=yes
+- pacman:
+ name: baz
+ state: absent
+ recurse: yes
# Run the equivalent of "pacman -Sy" as a separate step
-- pacman: update_cache=yes
+- pacman:
+ update_cache: yes
# Run the equivalent of "pacman -Su" as a separate step
-- pacman: upgrade=yes
+- pacman:
+ upgrade: yes
# Run the equivalent of "pacman -Syu" as a separate step
-- pacman: update_cache=yes upgrade=yes
+- pacman:
+ update_cache: yes
+ upgrade: yes
# Run the equivalent of "pacman -Rdd", force remove package baz
-- pacman: name=baz state=absent force=yes
+- pacman:
+ name: baz
+ state: absent
+ force: yes
'''
-import json
import shlex
import os
import re
@@ -124,13 +148,13 @@ def get_version(pacman_output):
return None
def query_package(module, pacman_path, name, state="present"):
- """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, and a second boolean to indicate if the package is up-to-date."""
+ """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available"""
if state == "present":
lcmd = "%s -Qi %s" % (pacman_path, name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
- return False, False
+ return False, False, False
# get the version installed locally (if any)
lversion = get_version(lstdout)
@@ -143,13 +167,19 @@ def query_package(module, pacman_path, name, state="present"):
if rrc == 0:
# Return True to indicate that the package is installed locally, and the result of the version number comparison
# to determine if the package is up-to-date.
- return True, (lversion == rversion)
+ return True, (lversion == rversion), False
- return False, False
+ # package is installed but cannot fetch remote Version. Last True stands for the error
+ return True, True, True
def update_package_db(module, pacman_path):
- cmd = "%s -Sy" % (pacman_path)
+ if module.params["force"]:
+ args = "Syy"
+ else:
+ args = "Sy"
+
+ cmd = "%s -%s" % (pacman_path, args)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
@@ -165,7 +195,7 @@ def upgrade(module, pacman_path):
if rc == 0:
if module.check_mode:
data = stdout.split('\n')
- module.exit_json(changed=True, msg="%s package(s) would be upgraded" % len(data))
+ module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data) - 1))
rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
if rc == 0:
module.exit_json(changed=True, msg='System upgraded')
@@ -175,14 +205,13 @@ def upgrade(module, pacman_path):
module.exit_json(changed=False, msg='Nothing to upgrade')
def remove_packages(module, pacman_path, packages):
- if module.params["recurse"]:
- args = "Rs"
- else:
- args = "R"
-
-def remove_packages(module, pacman_path, packages):
- if module.params["force"]:
- args = "Rdd"
+ if module.params["recurse"] or module.params["force"]:
+ if module.params["recurse"]:
+ args = "Rs"
+ if module.params["force"]:
+ args = "Rdd"
+ if module.params["recurse"] and module.params["force"]:
+ args = "Rdds"
else:
args = "R"
@@ -190,7 +219,7 @@ def remove_packages(module, pacman_path, packages):
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
- installed, updated = query_package(module, pacman_path, package)
+ installed, updated, unknown = query_package(module, pacman_path, package)
if not installed:
continue
@@ -211,10 +240,15 @@ def remove_packages(module, pacman_path, packages):
def install_packages(module, pacman_path, state, packages, package_files):
install_c = 0
+ package_err = []
+ message = ""
for i, package in enumerate(packages):
# if the package is installed and state == present or state == latest and is up-to-date then skip
- installed, updated = query_package(module, pacman_path, package)
+ installed, updated, latestError = query_package(module, pacman_path, package)
+ if latestError and state == 'latest':
+ package_err.append(package)
+
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
@@ -223,7 +257,7 @@ def install_packages(module, pacman_path, state, packages, package_files):
else:
params = '-S %s' % package
- cmd = "%s %s --noconfirm" % (pacman_path, params)
+ cmd = "%s %s --noconfirm --needed" % (pacman_path, params)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
@@ -231,16 +265,18 @@ def install_packages(module, pacman_path, state, packages, package_files):
install_c += 1
- if install_c > 0:
- module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+ if state == 'latest' and len(package_err) > 0:
+ message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err)
- module.exit_json(changed=False, msg="package(s) already installed")
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message))
+ module.exit_json(changed=False, msg="package(s) already installed. %s" % (message))
def check_packages(module, pacman_path, packages, state):
would_be_changed = []
for package in packages:
- installed, updated = query_package(module, pacman_path, package)
+ installed, updated, unknown = query_package(module, pacman_path, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
@@ -254,6 +290,25 @@ def check_packages(module, pacman_path, packages, state):
module.exit_json(changed=False, msg="package(s) already %s" % state)
+def expand_package_groups(module, pacman_path, pkgs):
+ expanded = []
+
+ for pkg in pkgs:
+ cmd = "%s -Sgq %s" % (pacman_path, pkg)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 0:
+ # A group was found matching the name, so expand it
+ for name in stdout.split('\n'):
+ name = name.strip()
+ if name:
+ expanded.append(name)
+ else:
+ expanded.append(pkg)
+
+ return expanded
+
+
def main():
module = AnsibleModule(
argument_spec = dict(
@@ -269,9 +324,6 @@ def main():
pacman_path = module.get_bin_path('pacman', True)
- if not os.path.exists(pacman_path):
- module.fail_json(msg="cannot find pacman, in path %s" % (pacman_path))
-
p = module.params
# normalize the state parameter
@@ -292,7 +344,7 @@ def main():
upgrade(module, pacman_path)
if p['name']:
- pkgs = p['name']
+ pkgs = expand_package_groups(module, pacman_path, p['name'])
pkg_files = []
for i, pkg in enumerate(pkgs):
diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py
index 837eefd243e..4c02d63821a 100644
--- a/packaging/os/pkg5.py
+++ b/packaging/os/pkg5.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: pkg5
@@ -49,10 +53,13 @@
'''
EXAMPLES = '''
# Install Vim:
-- pkg5: name=editor/vim
+- pkg5:
+ name: editor/vim
# Remove finger daemon:
-- pkg5: name=service/network/finger state=absent
+- pkg5:
+ name: service/network/finger
+ state: absent
# Install several packages at once:
- pkg5:
@@ -78,7 +85,7 @@ def main():
]
),
accept_licenses=dict(
- choices=BOOLEANS,
+ type='bool',
default=False,
aliases=['accept_licences', 'accept'],
),
@@ -165,4 +172,6 @@ def is_latest(module, package):
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py
index 3881f5dd0b8..279b40f0090 100644
--- a/packaging/os/pkg5_publisher.py
+++ b/packaging/os/pkg5_publisher.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: pkg5_publisher
@@ -66,10 +70,15 @@
'''
EXAMPLES = '''
# Fetch packages for the solaris publisher direct from Oracle:
-- pkg5_publisher: name=solaris sticky=true origin=https://pkg.oracle.com/solaris/support/
+- pkg5_publisher:
+ name: solaris
+ sticky: true
+ origin: https://pkg.oracle.com/solaris/support/
# Configure a publisher for locally-produced packages:
-- pkg5_publisher: name=site origin=https://pkg.example.com/site/
+- pkg5_publisher:
+ name: site
+ origin: 'https://pkg.example.com/site/'
'''
def main():
@@ -77,8 +86,8 @@ def main():
argument_spec=dict(
name=dict(required=True, aliases=['publisher']),
state=dict(default='present', choices=['present', 'absent']),
- sticky=dict(choices=BOOLEANS),
- enabled=dict(choices=BOOLEANS),
+ sticky=dict(type='bool'),
+ enabled=dict(type='bool'),
# search_after=dict(),
# search_before=dict(),
origin=dict(type='list'),
@@ -180,13 +189,14 @@ def get_publishers(module):
publishers[name]['origin'] = []
publishers[name]['mirror'] = []
- publishers[name][values['type']].append(values['uri'])
+ if values['type'] is not None:
+ publishers[name][values['type']].append(values['uri'])
return publishers
def unstringify(val):
- if val == "-":
+ if val == "-" or val == '':
return None
elif val == "true":
return True
@@ -197,4 +207,6 @@ def unstringify(val):
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/pkgin.py b/packaging/os/pkgin.py
old mode 100644
new mode 100755
index e600026409b..8e75f2d18ce
--- a/packaging/os/pkgin.py
+++ b/packaging/os/pkgin.py
@@ -3,6 +3,7 @@
# Copyright (c) 2013 Shaun Zinck
# Copyright (c) 2015 Lawrence Leonard Gilbert
+# Copyright (c) 2016 Jasper Lievisse Adriaanse
#
# Written by Shaun Zinck
# Based on pacman module written by Afterburn
@@ -22,6 +23,10 @@
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: pkgin
@@ -33,6 +38,7 @@
author:
- "Larry Gilbert (L2G)"
- "Shaun Zinck (@szinck)"
+ - "Jasper Lievisse Adriaanse (@jasperla)"
notes:
- "Known bug with pkgin < 0.8.0: if a package is removed and another
package depends on it, the other package will be silently removed as
@@ -42,34 +48,98 @@
description:
- Name of package to install/remove;
- multiple names may be given, separated by commas
- required: true
+ required: false
+ default: null
state:
description:
- Intended state of the package
choices: [ 'present', 'absent' ]
required: false
default: present
+ update_cache:
+ description:
+ - Update repository database. Can be run with other steps or on it's own.
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
+ upgrade:
+ description:
+ - Upgrade main packages to their newer versions
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
+ full_upgrade:
+ description:
+ - Upgrade all packages to their newer versions
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
+ clean:
+ description:
+ - Clean packages cache
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
+ force:
+ description:
+ - Force package reinstall
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
'''
EXAMPLES = '''
# install package foo
-- pkgin: name=foo state=present
+- pkgin:
+ name: foo
+ state: present
+
+# Update database and install "foo" package
+- pkgin:
+ name: foo
+ update_cache: yes
# remove package foo
-- pkgin: name=foo state=absent
+- pkgin:
+ name: foo
+ state: absent
# remove packages foo and bar
-- pkgin: name=foo,bar state=absent
+- pkgin:
+ name: foo,bar
+ state: absent
+
+# Update repositories as a separate step
+- pkgin:
+ update_cache: yes
+
+# Upgrade main packages (equivalent to C(pkgin upgrade))
+- pkgin:
+ upgrade: yes
+
+# Upgrade all packages (equivalent to C(pkgin full-upgrade))
+- pkgin:
+ full_upgrade: yes
+
+# Force-upgrade all packages (equivalent to C(pkgin -F full-upgrade))
+- pkgin:
+ full_upgrade: yes
+ force: yes
+
+# clean packages cache (equivalent to C(pkgin clean))
+- pkgin:
+ clean: yes
'''
-import json
-import shlex
-import os
-import sys
-import pipes
+import re
-def query_package(module, pkgin_path, name):
+def query_package(module, name):
"""Search for the package by name.
Possible return values:
@@ -79,7 +149,7 @@ def query_package(module, pkgin_path, name):
"""
# test whether '-p' (parsable) flag is supported.
- rc, out, err = module.run_command("%s -p -v" % pkgin_path)
+ rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
if rc == 0:
pflag = '-p'
@@ -90,38 +160,51 @@ def query_package(module, pkgin_path, name):
# Use "pkgin search" to find the package. The regular expression will
# only match on the complete name.
- rc, out, err = module.run_command("%s %s search \"^%s$\"" % (pkgin_path, pflag, name))
+ rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
# rc will not be 0 unless the search was a success
if rc == 0:
- # Get first line
- line = out.split('\n')[0]
-
- # Break up line at spaces. The first part will be the package with its
- # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
- # of the package:
- # '' - not installed
- # '<' - installed but out of date
- # '=' - installed and up to date
- # '>' - installed but newer than the repository version
- pkgname_with_version, raw_state = out.split(splitchar)[0:2]
-
- # Strip version
- # (results in sth like 'gcc47-libs')
- pkgname_without_version = '-'.join(pkgname_with_version.split('-')[:-1])
-
- if name != pkgname_without_version:
- return False
- # no fall-through
-
- # The package was found; now return its state
- if raw_state == '<':
- return 'outdated'
- elif raw_state == '=' or raw_state == '>':
- return 'present'
- else:
- return False
+ # Search results may contain more than one line (e.g., 'emacs'), so iterate
+ # through each line to see if we have a match.
+ packages = out.split('\n')
+
+ for package in packages:
+
+ # Break up line at spaces. The first part will be the package with its
+ # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
+ # of the package:
+ # '' - not installed
+ # '<' - installed but out of date
+ # '=' - installed and up to date
+ # '>' - installed but newer than the repository version
+ pkgname_with_version, raw_state = package.split(splitchar)[0:2]
+
+ # Search for package, stripping version
+ # (results in sth like 'gcc47-libs' or 'emacs24-nox11')
+ pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
+
+ # Do not proceed unless we have a match
+ if not pkg_search_obj:
+ continue
+
+ # Grab matched string
+ pkgname_without_version = pkg_search_obj.group(1)
+
+ if name != pkgname_without_version:
+ continue
+
+ # The package was found; now return its state
+ if raw_state == '<':
+ return 'outdated'
+ elif raw_state == '=' or raw_state == '>':
+ return 'present'
+ else:
+ return False
+ # no fall-through
+
+ # No packages were matched, so return False
+ return False
def format_action_message(module, action, count):
@@ -139,31 +222,43 @@ def format_action_message(module, action, count):
return message + "s"
-def format_pkgin_command(module, pkgin_path, command, package):
- vars = { "pkgin": pkgin_path,
+def format_pkgin_command(module, command, package=None):
+ # Not all commands take a package argument, so cover this up by passing
+ # an empty string. Some commands (e.g. 'update') will ignore extra
+ # arguments, however this behaviour cannot be relied on for others.
+ if package is None:
+ package = ""
+
+ if module.params["force"]:
+ force = "-F"
+ else:
+ force = ""
+
+ vars = { "pkgin": PKGIN_PATH,
"command": command,
- "package": package }
+ "package": package,
+ "force": force}
if module.check_mode:
return "%(pkgin)s -n %(command)s %(package)s" % vars
else:
- return "%(pkgin)s -y %(command)s %(package)s" % vars
+ return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
-def remove_packages(module, pkgin_path, packages):
+def remove_packages(module, packages):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
- if not query_package(module, pkgin_path, package):
+ if not query_package(module, package):
continue
rc, out, err = module.run_command(
- format_pkgin_command(module, pkgin_path, "remove", package))
+ format_pkgin_command(module, "remove", package))
- if not module.check_mode and query_package(module, pkgin_path, package):
+ if not module.check_mode and query_package(module, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
@@ -174,18 +269,18 @@ def remove_packages(module, pkgin_path, packages):
module.exit_json(changed=False, msg="package(s) already absent")
-def install_packages(module, pkgin_path, packages):
+def install_packages(module, packages):
install_c = 0
for package in packages:
- if query_package(module, pkgin_path, package):
+ if query_package(module, package):
continue
rc, out, err = module.run_command(
- format_pkgin_command(module, pkgin_path, "install", package))
+ format_pkgin_command(module, "install", package))
- if not module.check_mode and not query_package(module, pkgin_path, package):
+ if not module.check_mode and not query_package(module, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
@@ -195,28 +290,100 @@ def install_packages(module, pkgin_path, packages):
module.exit_json(changed=False, msg="package(s) already present")
+def update_package_db(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "update"))
+
+ if rc == 0:
+ if re.search('database for.*is up-to-date\n$', out):
+ return False, "datebase is up-to-date"
+ else:
+ return True, "updated repository database"
+ else:
+ module.fail_json(msg="could not update package db")
+
+def do_upgrade_packages(module, full=False):
+ if full:
+ cmd = "full-upgrade"
+ else:
+ cmd = "upgrade"
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, cmd))
+
+ if rc == 0:
+ if re.search('^nothing to do.\n$', out):
+ module.exit_json(changed=False, msg="nothing left to upgrade")
+ else:
+ module.fail_json(msg="could not %s packages" % cmd)
+
+def upgrade_packages(module):
+ do_upgrade_packages(module)
+
+def full_upgrade_packages(module):
+ do_upgrade_packages(module, True)
+def clean_cache(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "clean"))
+
+ if rc == 0:
+ # There's no indication if 'clean' actually removed anything,
+ # so assume it did.
+ module.exit_json(changed=True, msg="cleaned caches")
+ else:
+ module.fail_json(msg="could not clean package cache")
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"]),
- name = dict(aliases=["pkg"], required=True)),
+ name = dict(aliases=["pkg"], type='list'),
+ update_cache = dict(default='no', type='bool'),
+ upgrade = dict(default='no', type='bool'),
+ full_upgrade = dict(default='no', type='bool'),
+ clean = dict(default='no', type='bool'),
+ force = dict(default='no', type='bool')),
+ required_one_of = [['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
supports_check_mode = True)
- pkgin_path = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
+ global PKGIN_PATH
+ PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
p = module.params
- pkgs = p["name"].split(",")
+ if p["update_cache"]:
+ c, msg = update_package_db(module)
+ if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
+ module.exit_json(changed=c, msg=msg)
+
+ if p["upgrade"]:
+ upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded packages')
+
+ if p["full_upgrade"]:
+ full_upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded all packages')
+
+ if p["clean"]:
+ clean_cache(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='cleaned caches')
+
+ pkgs = p["name"]
if p["state"] == "present":
- install_packages(module, pkgin_path, pkgs)
+ install_packages(module, pkgs)
elif p["state"] == "absent":
- remove_packages(module, pkgin_path, pkgs)
+ remove_packages(module, pkgs)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/pkgng.py b/packaging/os/pkgng.py
index fe0f2687b31..5727b190031 100644
--- a/packaging/os/pkgng.py
+++ b/packaging/os/pkgng.py
@@ -21,6 +21,10 @@
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: pkgng
@@ -32,42 +36,56 @@
options:
name:
description:
- - name of package to install/remove
+ - Name of package to install/remove.
required: true
state:
description:
- - state of the package
+ - State of the package.
choices: [ 'present', 'absent' ]
required: false
default: present
cached:
description:
- - use local package base or try to fetch an updated one
+ - Use local package base instead of fetching an updated one.
choices: [ 'yes', 'no' ]
required: false
default: no
annotation:
description:
- - a comma-separated list of keyvalue-pairs of the form
- <+/-/:>[=]. A '+' denotes adding an annotation, a
- '-' denotes removing an annotation, and ':' denotes modifying an
+ - A comma-separated list of keyvalue-pairs of the form
+ C(<+/-/:>[=]). A C(+) denotes adding an annotation, a
+ C(-) denotes removing an annotation, and C(:) denotes modifying an
annotation.
If setting or modifying annotations, a value must be provided.
required: false
version_added: "1.6"
pkgsite:
description:
- - for pkgng versions before 1.1.4, specify packagesite to use
- for downloading packages, if not specified, use settings from
- /usr/local/etc/pkg.conf
- for newer pkgng versions, specify a the name of a repository
- configured in /usr/local/etc/pkg/repos
+ - For pkgng versions before 1.1.4, specify packagesite to use
+ for downloading packages. If not specified, use settings from
+ C(/usr/local/etc/pkg.conf).
+ - For newer pkgng versions, specify a the name of a repository
+ configured in C(/usr/local/etc/pkg/repos).
required: false
rootdir:
description:
- - for pkgng versions 1.5 and later, pkg will install all packages
- within the specified root directory
+ - For pkgng versions 1.5 and later, pkg will install all packages
+ within the specified root directory.
+ - Can not be used together with I(chroot) option.
+ required: false
+ chroot:
+ version_added: "2.1"
+ description:
+ - Pkg will chroot in the specified environment.
+ - Can not be used together with I(rootdir) option.
required: false
+ autoremove:
+ version_added: "2.2"
+ description:
+ - Remove automatically installed packages which are no longer needed.
+ required: false
+ choices: [ "yes", "no" ]
+ default: no
author: "bleader (@bleader)"
notes:
- When using pkgsite, be careful that already in cache packages won't be downloaded again.
@@ -75,25 +93,28 @@
EXAMPLES = '''
# Install package foo
-- pkgng: name=foo state=present
+- pkgng:
+ name: foo
+ state: present
# Annotate package foo and bar
-- pkgng: name=foo,bar annotation=+test1=baz,-test2,:test3=foobar
+- pkgng:
+ name: foo,bar
+ annotation: '+test1=baz,-test2,:test3=foobar'
# Remove packages foo and bar
-- pkgng: name=foo,bar state=absent
+- pkgng:
+ name: foo,bar
+ state: absent
'''
-import json
-import shlex
-import os
import re
-import sys
+from ansible.module_utils.basic import AnsibleModule
-def query_package(module, pkgng_path, name, rootdir_arg):
+def query_package(module, pkgng_path, name, dir_arg):
- rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, rootdir_arg, name))
+ rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
if rc == 0:
return True
@@ -103,7 +124,7 @@ def query_package(module, pkgng_path, name, rootdir_arg):
def pkgng_older_than(module, pkgng_path, compare_version):
rc, out, err = module.run_command("%s -v" % pkgng_path)
- version = map(lambda x: int(x), re.split(r'[\._]', out))
+ version = [int(x) for x in re.split(r'[\._]', out)]
i = 0
new_pkgng = True
@@ -117,21 +138,21 @@ def pkgng_older_than(module, pkgng_path, compare_version):
return not new_pkgng
-def remove_packages(module, pkgng_path, packages, rootdir_arg):
-
+def remove_packages(module, pkgng_path, packages, dir_arg):
+
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
- if not query_package(module, pkgng_path, package, rootdir_arg):
+ if not query_package(module, pkgng_path, package, dir_arg):
continue
if not module.check_mode:
- rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, rootdir_arg, package))
+ rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
- if not module.check_mode and query_package(module, pkgng_path, package, rootdir_arg):
+ if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
-
+
remove_c += 1
if remove_c > 0:
@@ -141,7 +162,7 @@ def remove_packages(module, pkgng_path, packages, rootdir_arg):
return (False, "package(s) already absent")
-def install_packages(module, pkgng_path, packages, cached, pkgsite, rootdir_arg):
+def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg):
install_c = 0
@@ -161,44 +182,44 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite, rootdir_arg)
if old_pkgng:
rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
else:
- rc, out, err = module.run_command("%s update" % (pkgng_path))
+ rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg))
if rc != 0:
module.fail_json(msg="Could not update catalogue")
for package in packages:
- if query_package(module, pkgng_path, package, rootdir_arg):
+ if query_package(module, pkgng_path, package, dir_arg):
continue
if not module.check_mode:
if old_pkgng:
rc, out, err = module.run_command("%s %s %s install -g -U -y %s" % (batch_var, pkgsite, pkgng_path, package))
else:
- rc, out, err = module.run_command("%s %s %s install %s -g -U -y %s" % (batch_var, pkgng_path, rootdir_arg, pkgsite, package))
+ rc, out, err = module.run_command("%s %s %s install %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, pkgsite, package))
- if not module.check_mode and not query_package(module, pkgng_path, package, rootdir_arg):
+ if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err)
install_c += 1
-
+
if install_c > 0:
return (True, "added %s package(s)" % (install_c))
return (False, "package(s) already present")
-def annotation_query(module, pkgng_path, package, tag, rootdir_arg):
- rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, rootdir_arg, package))
+def annotation_query(module, pkgng_path, package, tag, dir_arg):
+ rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE)
if match:
return match.group('value')
return False
-def annotation_add(module, pkgng_path, package, tag, value, rootdir_arg):
- _value = annotation_query(module, pkgng_path, package, tag, rootdir_arg)
+def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not _value:
# Annotation does not exist, add it.
rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
- % (pkgng_path, rootdir_arg, package, tag, value))
+ % (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json("could not annotate %s: %s"
% (package, out), stderr=err)
@@ -213,19 +234,19 @@ def annotation_add(module, pkgng_path, package, tag, value, rootdir_arg):
# Annotation exists, nothing to do
return False
-def annotation_delete(module, pkgng_path, package, tag, value, rootdir_arg):
- _value = annotation_query(module, pkgng_path, package, tag, rootdir_arg)
+def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if _value:
rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
- % (pkgng_path, rootdir_arg, package, tag))
+ % (pkgng_path, dir_arg, package, tag))
if rc != 0:
module.fail_json("could not delete annotation to %s: %s"
% (package, out), stderr=err)
return True
return False
-def annotation_modify(module, pkgng_path, package, tag, value, rootdir_arg):
- _value = annotation_query(module, pkgng_path, package, tag, rootdir_arg)
+def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not value:
# No such tag
module.fail_json("could not change annotation to %s: tag %s does not exist"
@@ -235,14 +256,14 @@ def annotation_modify(module, pkgng_path, package, tag, value, rootdir_arg):
return False
else:
rc,out,err = module.run_command('%s %s annotate -y -M %s %s "%s"'
- % (pkgng_path, rootdir_arg, package, tag, value))
+ % (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json("could not change annotation annotation to %s: %s"
% (package, out), stderr=err)
return True
-def annotate_packages(module, pkgng_path, packages, annotation, rootdir_arg):
+def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
annotate_c = 0
annotations = map(lambda _annotation:
re.match(r'(?P[\+-:])(?P\w+)(=(?P\w+))?',
@@ -264,54 +285,79 @@ def annotate_packages(module, pkgng_path, packages, annotation, rootdir_arg):
return (True, "added %s annotations." % annotate_c)
return (False, "changed no annotations")
+def autoremove_packages(module, pkgng_path, dir_arg):
+ rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
+
+ autoremove_c = 0
+
+ match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
+ if match:
+ autoremove_c = int(match.group(1))
+
+ if autoremove_c == 0:
+ return False, "no package(s) to autoremove"
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
+
+ return True, "autoremoved %d package(s)" % (autoremove_c)
+
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"], required=False),
- name = dict(aliases=["pkg"], required=True),
+ name = dict(aliases=["pkg"], required=True, type='list'),
cached = dict(default=False, type='bool'),
annotation = dict(default="", required=False),
pkgsite = dict(default="", required=False),
- rootdir = dict(default="", required=False)),
- supports_check_mode = True)
+ rootdir = dict(default="", required=False, type='path'),
+ chroot = dict(default="", required=False, type='path'),
+ autoremove = dict(default=False, type='bool')),
+ supports_check_mode = True,
+ mutually_exclusive =[["rootdir", "chroot"]])
pkgng_path = module.get_bin_path('pkg', True)
p = module.params
- pkgs = p["name"].split(",")
+ pkgs = p["name"]
changed = False
msgs = []
- rootdir_arg = ""
+ dir_arg = ""
if p["rootdir"] != "":
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
if old_pkgng:
module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
else:
- rootdir_arg = "--rootdir %s" % (p["rootdir"])
+ dir_arg = "--rootdir %s" % (p["rootdir"])
+
+ if p["chroot"] != "":
+ dir_arg = '--chroot %s' % (p["chroot"])
if p["state"] == "present":
- _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], rootdir_arg)
+ _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg)
changed = changed or _changed
msgs.append(_msg)
elif p["state"] == "absent":
- _changed, _msg = remove_packages(module, pkgng_path, pkgs, rootdir_arg)
+ _changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["autoremove"]:
+ _changed, _msg = autoremove_packages(module, pkgng_path, dir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["annotation"]:
- _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], rootdir_arg)
+ _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
changed = changed or _changed
msgs.append(_msg)
module.exit_json(changed=changed, msg=", ".join(msgs))
-
-# import module snippets
-from ansible.module_utils.basic import *
-
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py
index 3a4720630cf..a54e96eeb08 100644
--- a/packaging/os/pkgutil.py
+++ b/packaging/os/pkgutil.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
# (c) 2013, Alexander Winkler
-# based on svr4pkg by
-# Boyd Adamson (2012)
+# based on svr4pkg by
+# Boyd Adamson (2012)
#
# This file is part of Ansible
#
@@ -21,6 +21,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: pkgutil
@@ -42,6 +46,7 @@
description:
- Specifies the repository path to install the package from.
- Its global definition is done in C(/etc/opt/csw/pkgutil.conf).
+ required: false
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package.
@@ -49,24 +54,35 @@
- "Note: The module has a limitation that (C(latest)) only works for one package, not lists of them."
required: true
choices: ["present", "absent", "latest"]
+ update_catalog:
+ description:
+ - If you want to refresh your catalog from the mirror, set this to (C(yes)).
+ required: false
+ default: False
+ version_added: "2.1"
'''
EXAMPLES = '''
# Install a package
-pkgutil: name=CSWcommon state=present
+- pkgutil:
+ name: CSWcommon
+ state: present
# Install a package from a specific repository
-pkgutil: name=CSWnrpe site='ftp://myinternal.repo/opencsw/kiel state=latest'
+- pkgutil:
+ name: CSWnrpe
+ site: 'ftp://myinternal.repo/opencsw/kiel'
+ state: latest
'''
import os
import pipes
def package_installed(module, name):
- cmd = [module.get_bin_path('pkginfo', True)]
+ cmd = ['pkginfo']
cmd.append('-q')
cmd.append(name)
- rc, out, err = module.run_command(' '.join(cmd))
+ rc, out, err = run_command(module, cmd)
if rc == 0:
return True
else:
@@ -74,24 +90,25 @@ def package_installed(module, name):
def package_latest(module, name, site):
# Only supports one package
- cmd = [ 'pkgutil', '--single', '-c' ]
+ cmd = [ 'pkgutil', '-U', '--single', '-c' ]
if site is not None:
- cmd += [ '-t', pipes.quote(site) ]
- cmd.append(pipes.quote(name))
- cmd += [ '| tail -1 | grep -v SAME' ]
- rc, out, err = module.run_command(' '.join(cmd), use_unsafe_shell=True)
- if rc == 1:
- return True
- else:
- return False
+ cmd += [ '-t', site]
+ cmd.append(name)
+ rc, out, err = run_command(module, cmd)
+ # replace | tail -1 |grep -v SAME
+ # use -2, because splitting on \n create a empty line
+ # at the end of the list
+ return 'SAME' in out.split('\n')[-2]
-def run_command(module, cmd):
+def run_command(module, cmd, **kwargs):
progname = cmd[0]
- cmd[0] = module.get_bin_path(progname, True)
- return module.run_command(cmd)
+ cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin'])
+ return module.run_command(cmd, **kwargs)
-def package_install(module, state, name, site):
+def package_install(module, state, name, site, update_catalog):
cmd = [ 'pkgutil', '-iy' ]
+ if update_catalog:
+ cmd += [ '-U' ]
if site is not None:
cmd += [ '-t', site ]
if state == 'latest':
@@ -100,8 +117,10 @@ def package_install(module, state, name, site):
(rc, out, err) = run_command(module, cmd)
return (rc, out, err)
-def package_upgrade(module, name, site):
+def package_upgrade(module, name, site, update_catalog):
cmd = [ 'pkgutil', '-ufy' ]
+ if update_catalog:
+ cmd += [ '-U' ]
if site is not None:
cmd += [ '-t', site ]
cmd.append(name)
@@ -119,12 +138,14 @@ def main():
name = dict(required = True),
state = dict(required = True, choices=['present', 'absent','latest']),
site = dict(default = None),
+ update_catalog = dict(required = False, default = False, type='bool'),
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
site = module.params['site']
+ update_catalog = module.params['update_catalog']
rc = None
out = ''
err = ''
@@ -136,31 +157,59 @@ def main():
if not package_installed(module, name):
if module.check_mode:
module.exit_json(changed=True)
- (rc, out, err) = package_install(module, state, name, site)
+ (rc, out, err) = package_install(module, state, name, site, update_catalog)
# Stdout is normally empty but for some packages can be
# very long and is not often useful
if len(out) > 75:
out = out[:75] + '...'
+ if rc != 0:
+ if err:
+ msg = err
+ else:
+ msg = out
+ module.fail_json(msg=msg)
elif state == 'latest':
if not package_installed(module, name):
if module.check_mode:
module.exit_json(changed=True)
- (rc, out, err) = package_install(module, state, name, site)
+ (rc, out, err) = package_install(module, state, name, site, update_catalog)
+ if len(out) > 75:
+ out = out[:75] + '...'
+ if rc != 0:
+ if err:
+ msg = err
+ else:
+ msg = out
+ module.fail_json(msg=msg)
+
else:
if not package_latest(module, name, site):
if module.check_mode:
module.exit_json(changed=True)
- (rc, out, err) = package_upgrade(module, name, site)
+ (rc, out, err) = package_upgrade(module, name, site, update_catalog)
if len(out) > 75:
out = out[:75] + '...'
+ if rc != 0:
+ if err:
+ msg = err
+ else:
+ msg = out
+ module.fail_json(msg=msg)
elif state == 'absent':
if package_installed(module, name):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_uninstall(module, name)
- out = out[:75]
+ if len(out) > 75:
+ out = out[:75] + '...'
+ if rc != 0:
+ if err:
+ msg = err
+ else:
+ msg = out
+ module.fail_json(msg=msg)
if rc is None:
# pkgutil was not executed because the package was already present/absent
@@ -180,4 +229,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/portage.py b/packaging/os/portage.py
index 7be55db3ca8..5debeda058c 100644
--- a/packaging/os/portage.py
+++ b/packaging/os/portage.py
@@ -1,8 +1,10 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+# (c) 2016, William L Thomson Jr
# (c) 2013, Yap Sok Ann
# Written by Yap Sok Ann
+# Modified by William L. Thomson Jr.
# Based on apt module written by Matthew Williams
#
# This module is free software: you can redistribute it and/or modify
@@ -19,6 +21,10 @@
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: portage
@@ -76,29 +82,29 @@
description:
- Do not add the packages to the world file (--oneshot)
required: false
- default: null
- choices: [ "yes" ]
+ default: False
+ choices: [ "yes", "no" ]
noreplace:
description:
- Do not re-emerge installed packages (--noreplace)
required: false
- default: null
- choices: [ "yes" ]
+ default: False
+ choices: [ "yes", "no" ]
nodeps:
description:
- Only merge packages but not their dependencies (--nodeps)
required: false
- default: null
- choices: [ "yes" ]
+ default: False
+ choices: [ "yes", "no" ]
onlydeps:
description:
- Only merge packages' dependencies but not the packages (--onlydeps)
required: false
- default: null
- choices: [ "yes" ]
+ default: False
+ choices: [ "yes", "no" ]
depclean:
description:
@@ -106,22 +112,22 @@
- If no package is specified, clean up the world's dependencies
- Otherwise, --depclean serves as a dependency aware version of --unmerge
required: false
- default: null
- choices: [ "yes" ]
+ default: False
+ choices: [ "yes", "no" ]
quiet:
description:
- Run emerge in quiet mode (--quiet)
required: false
- default: null
- choices: [ "yes" ]
+ default: False
+ choices: [ "yes", "no" ]
verbose:
description:
- Run emerge in verbose mode (--verbose)
required: false
- default: null
- choices: [ "yes" ]
+ default: False
+ choices: [ "yes", "no" ]
sync:
description:
@@ -130,24 +136,50 @@
- If web, perform "emerge-webrsync"
required: false
default: null
- choices: [ "yes", "web" ]
+ choices: [ "yes", "web", "no" ]
getbinpkg:
description:
- Prefer packages specified at PORTAGE_BINHOST in make.conf
required: false
- default: null
- choices: [ "yes" ]
+ default: False
+ choices: [ "yes", "no" ]
usepkgonly:
description:
- Merge only binaries (no compiling). This sets getbinpkg=yes.
required: false
- deafult: null
- choices: [ "yes" ]
+ default: False
+ choices: [ "yes", "no" ]
+
+ keepgoing:
+ description:
+ - Continue as much as possible after an error.
+ required: false
+ default: False
+ choices: [ "yes", "no" ]
+ version_added: 2.3
+
+ jobs:
+ description:
+ - Specifies the number of packages to build simultaneously.
+ required: false
+ default: None
+ type: int
+ version_added: 2.3
+
+ loadavg:
+ description:
+ - Specifies that no new builds should be started if there are
+ - other builds running and the load average is at least LOAD
+ required: false
+ default: None
+ type: float
+ version_added: 2.3
requirements: [ gentoolkit ]
-author:
+author:
+ - "William L Thomson Jr (@wltjr)"
- "Yap Sok Ann (@sayap)"
- "Andrew Udvare"
notes: []
@@ -155,28 +187,46 @@
EXAMPLES = '''
# Make sure package foo is installed
-- portage: package=foo state=present
+- portage:
+ package: foo
+ state: present
# Make sure package foo is not installed
-- portage: package=foo state=absent
+- portage:
+ package: foo
+ state: absent
# Update package foo to the "best" version
-- portage: package=foo update=yes
+- portage:
+ package: foo
+ update: yes
# Install package foo using PORTAGE_BINHOST setup
-- portage: package=foo getbinpkg=yes
+- portage:
+ package: foo
+ getbinpkg: yes
# Re-install world from binary packages only and do not allow any compiling
-- portage: package=@world usepkgonly=yes
+- portage:
+ package: @world
+ usepkgonly: yes
# Sync repositories and update world
-- portage: package=@world update=yes deep=yes sync=yes
+- portage:
+ package: @world
+ update: yes
+ deep: yes
+ sync: yes
# Remove unneeded packages
-- portage: depclean=yes
+- portage:
+ depclean: yes
# Remove package foo if it is not explicitly needed
-- portage: package=foo state=absent depclean=yes
+- portage:
+ package: foo
+ state: absent
+ depclean: yes
'''
@@ -272,14 +322,24 @@ def emerge_packages(module, packages):
'getbinpkg': '--getbinpkg',
'usepkgonly': '--usepkgonly',
'usepkg': '--usepkg',
+ 'keepgoing': '--keep-going',
}
- for flag, arg in emerge_flags.iteritems():
+ for flag, arg in emerge_flags.items():
if p[flag]:
args.append(arg)
if p['usepkg'] and p['usepkgonly']:
module.fail_json(msg='Use only one of usepkg, usepkgonly')
+ emerge_flags = {
+ 'jobs': '--jobs=',
+ 'loadavg': '--load-average ',
+ }
+
+ for flag, arg in emerge_flags.items():
+ if p[flag] is not None:
+ args.append(arg + str(p[flag]))
+
cmd, (rc, out, err) = run_emerge(module, packages, *args)
if rc != 0:
module.fail_json(
@@ -396,26 +456,29 @@ def run_emerge(module, packages, *args):
def main():
module = AnsibleModule(
argument_spec=dict(
- package=dict(default=None, aliases=['name']),
+ package=dict(default=None, aliases=['name'], type='list'),
state=dict(
default=portage_present_states[0],
choices=portage_present_states + portage_absent_states,
),
- update=dict(default=None, choices=['yes']),
- deep=dict(default=None, choices=['yes']),
- newuse=dict(default=None, choices=['yes']),
- changed_use=dict(default=None, choices=['yes']),
- oneshot=dict(default=None, choices=['yes']),
- noreplace=dict(default=None, choices=['yes']),
- nodeps=dict(default=None, choices=['yes']),
- onlydeps=dict(default=None, choices=['yes']),
- depclean=dict(default=None, choices=['yes']),
- quiet=dict(default=None, choices=['yes']),
- verbose=dict(default=None, choices=['yes']),
+ update=dict(default=False, type='bool'),
+ deep=dict(default=False, type='bool'),
+ newuse=dict(default=False, type='bool'),
+ changed_use=dict(default=False, type='bool'),
+ oneshot=dict(default=False, type='bool'),
+ noreplace=dict(default=False, type='bool'),
+ nodeps=dict(default=False, type='bool'),
+ onlydeps=dict(default=False, type='bool'),
+ depclean=dict(default=False, type='bool'),
+ quiet=dict(default=False, type='bool'),
+ verbose=dict(default=False, type='bool'),
sync=dict(default=None, choices=['yes', 'web']),
- getbinpkg=dict(default=None, choices=['yes']),
- usepkgonly=dict(default=None, choices=['yes']),
- usepkg=dict(default=None, choices=['yes']),
+ getbinpkg=dict(default=False, type='bool'),
+ usepkgonly=dict(default=False, type='bool'),
+ usepkg=dict(default=False, type='bool'),
+ keepgoing=dict(default=False, type='bool'),
+ jobs=dict(default=None, type='int'),
+ loadavg=dict(default=None, type='float'),
),
required_one_of=[['package', 'sync', 'depclean']],
mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']],
@@ -434,7 +497,7 @@ def main():
packages = []
if p['package']:
- packages.extend(p['package'].split(','))
+ packages.extend(p['package'])
if p['depclean']:
if packages and p['state'] not in portage_absent_states:
@@ -454,4 +517,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/portinstall.py b/packaging/os/portinstall.py
index b4e3044167e..ccd301e526a 100644
--- a/packaging/os/portinstall.py
+++ b/packaging/os/portinstall.py
@@ -19,6 +19,10 @@
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: portinstall
@@ -48,17 +52,22 @@
EXAMPLES = '''
# Install package foo
-- portinstall: name=foo state=present
+- portinstall:
+ name: foo
+ state: present
# Install package security/cyrus-sasl2-saslauthd
-- portinstall: name=security/cyrus-sasl2-saslauthd state=present
+- portinstall:
+ name: security/cyrus-sasl2-saslauthd
+ state: present
# Remove packages foo and bar
-- portinstall: name=foo,bar state=absent
+- portinstall:
+ name: foo,bar
+ state: absent
'''
-import json
import shlex
import os
import sys
@@ -204,4 +213,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/slackpkg.py b/packaging/os/slackpkg.py
index 674de538efe..3c4ee4f62e2 100644
--- a/packaging/os/slackpkg.py
+++ b/packaging/os/slackpkg.py
@@ -22,6 +22,10 @@
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: slackpkg
@@ -56,14 +60,19 @@
EXAMPLES = '''
# Install package foo
-- slackpkg: name=foo state=present
+- slackpkg:
+ name: foo
+ state: present
# Remove packages foo and bar
-- slackpkg: name=foo,bar state=absent
+- slackpkg:
+ name: foo,bar
+ state: absent
# Make sure that it is the most updated package
-- slackpkg: name=foo state=latest
-
+- slackpkg:
+ name: foo
+ state: latest
'''
@@ -196,4 +205,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/svr4pkg.py b/packaging/os/svr4pkg.py
index 5d8bac17eaa..81409e3b2dd 100644
--- a/packaging/os/svr4pkg.py
+++ b/packaging/os/svr4pkg.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: svr4pkg
@@ -75,19 +79,35 @@
EXAMPLES = '''
# Install a package from an already copied file
-- svr4pkg: name=CSWcommon src=/tmp/cswpkgs.pkg state=present
+- svr4pkg:
+ name: CSWcommon
+ src: /tmp/cswpkgs.pkg
+ state: present
# Install a package directly from an http site
-- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present zone=current
+- svr4pkg:
+ name: CSWpkgutil
+ src: 'http://get.opencsw.org/now'
+ state: present
+ zone: current
# Install a package with a response file
-- svr4pkg: name=CSWggrep src=/tmp/third-party.pkg response_file=/tmp/ggrep.response state=present
+- svr4pkg:
+ name: CSWggrep
+ src: /tmp/third-party.pkg
+ response_file: /tmp/ggrep.response
+ state: present
# Ensure that a package is not installed.
-- svr4pkg: name=SUNWgnome-sound-recorder state=absent
+- svr4pkg:
+ name: SUNWgnome-sound-recorder
+ state: absent
# Ensure that a category is not installed.
-- svr4pkg: name=FIREFOX state=absent category=true
+- svr4pkg:
+ name: FIREFOX
+ state: absent
+ category: true
'''
@@ -225,9 +245,10 @@ def main():
else:
result['changed'] = False
+ # rc will be none when the package already was installed and no action took place
# Only return failed=False when the returncode is known to be good as there may be more
# undocumented failure return codes
- if rc not in (0, 2, 10, 20):
+ if rc not in (None, 0, 2, 10, 20):
result['failed'] = True
else:
result['failed'] = False
@@ -241,4 +262,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/swdepot.py b/packaging/os/swdepot.py
index b14af742057..6ea7d1059be 100644
--- a/packaging/os/swdepot.py
+++ b/packaging/os/swdepot.py
@@ -21,6 +21,10 @@
import re
import pipes
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: swdepot
@@ -58,9 +62,19 @@
'''
EXAMPLES = '''
-- swdepot: name=unzip-6.0 state=installed depot=repository:/path
-- swdepot: name=unzip state=latest depot=repository:/path
-- swdepot: name=unzip state=absent
+- swdepot:
+ name: unzip-6.0
+ state: installed
+ depot: 'repository:/path'
+
+- swdepot:
+ name: unzip
+ state: latest
+ depot: 'repository:/path'
+
+- swdepot:
+ name: unzip
+ state: absent
'''
def compare_package(version1, version2):
@@ -192,5 +206,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
-
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/urpmi.py b/packaging/os/urpmi.py
index 7b7aaefbd1d..e995f1d4894 100644
--- a/packaging/os/urpmi.py
+++ b/packaging/os/urpmi.py
@@ -19,6 +19,10 @@
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: urpmi
@@ -44,9 +48,9 @@
required: false
default: no
choices: [ "yes", "no" ]
- no-suggests:
+ no-recommends:
description:
- - Corresponds to the C(--no-suggests) option for I(urpmi).
+ - Corresponds to the C(--no-recommends) option for I(urpmi).
required: false
default: yes
choices: [ "yes", "no" ]
@@ -63,17 +67,28 @@
EXAMPLES = '''
# install package foo
-- urpmi: pkg=foo state=present
+- urpmi:
+ pkg: foo
+ state: present
+
# remove package foo
-- urpmi: pkg=foo state=absent
+- urpmi:
+ pkg: foo
+ state: absent
+
# description: remove packages foo and bar
-- urpmi: pkg=foo,bar state=absent
+- urpmi:
+ pkg: foo,bar
+ state: absent
+
# description: update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists)
-- urpmi: name=bar, state=present, update_cache=yes
+- urpmi:
+ name: bar
+ state: present
+ update_cache: yes
'''
-import json
import shlex
import os
import sys
@@ -130,7 +145,7 @@ def remove_packages(module, packages):
module.exit_json(changed=False, msg="package(s) already absent")
-def install_packages(module, pkgspec, force=True, no_suggests=True):
+def install_packages(module, pkgspec, force=True, no_recommends=True):
packages = ""
for package in pkgspec:
@@ -138,17 +153,17 @@ def install_packages(module, pkgspec, force=True, no_suggests=True):
packages += "'%s' " % package
if len(packages) != 0:
- if no_suggests:
- no_suggests_yes = '--no-suggests'
+ if no_recommends:
+ no_recommends_yes = '--no-recommends'
else:
- no_suggests_yes = ''
+ no_recommends_yes = ''
if force:
force_yes = '--force'
else:
force_yes = ''
- cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_suggests_yes, packages))
+ cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_recommends_yes, packages))
rc, out, err = module.run_command(cmd)
@@ -168,12 +183,12 @@ def install_packages(module, pkgspec, force=True, no_suggests=True):
def main():
module = AnsibleModule(
- argument_spec = dict(
- state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
- update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
- force = dict(default=True, type='bool'),
- no_suggests = dict(default=True, aliases=['no-suggests'], type='bool'),
- package = dict(aliases=['pkg', 'name'], required=True)))
+ argument_spec = dict(
+ state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
+ update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
+ force = dict(default=True, type='bool'),
+ no_recommends = dict(default=True, aliases=['no-recommends'], type='bool'),
+ package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(URPMI_PATH):
@@ -182,7 +197,7 @@ def main():
p = module.params
force_yes = p['force']
- no_suggest_yes = p['no_suggests']
+ no_recommends_yes = p['no_recommends']
if p['update_cache']:
update_package_db(module)
@@ -190,7 +205,7 @@ def main():
packages = p['package'].split(',')
if p['state'] in [ 'installed', 'present' ]:
- install_packages(module, packages, force_yes, no_suggest_yes)
+ install_packages(module, packages, force_yes, no_recommends_yes)
elif p['state'] in [ 'removed', 'absent' ]:
remove_packages(module, packages)
@@ -198,4 +213,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/xbps.py b/packaging/os/xbps.py
new file mode 100644
index 00000000000..0bfe678ab89
--- /dev/null
+++ b/packaging/os/xbps.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2016 Dino Occhialini
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: xbps
+short_description: Manage packages with XBPS
+description:
+ - Manage packages with the XBPS package manager.
+author:
+ - "Dino Occhialini (@dinoocch)"
+ - "Michael Aldridge (@the-maldridge)"
+version_added: "2.3"
+options:
+ name:
+ description:
+ - Name of the package to install, upgrade, or remove.
+ required: false
+ default: null
+ state:
+ description:
+ - Desired state of the package.
+ required: false
+ default: "present"
+ choices: ["present", "absent", "latest"]
+ recurse:
+ description:
+ - When removing a package, also remove its dependencies, provided
+ that they are not required by other packages and were not
+ explicitly installed by a user.
+ required: false
+ default: no
+ choices: ["yes", "no"]
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists. This can be
+ run as part of a package installation or as a separate step.
+ required: false
+ default: yes
+ choices: ["yes", "no"]
+ upgrade:
+ description:
+ - Whether or not to upgrade whole system
+ required: false
+ default: no
+ choices: ["yes", "no"]
+'''
+
+EXAMPLES = '''
+# Install package foo
+- xbps: name=foo state=present
+# Upgrade package foo
+- xbps: name=foo state=latest update_cache=yes
+# Remove packages foo and bar
+- xbps: name=foo,bar state=absent
+# Recursively remove package foo
+- xbps: name=foo state=absent recurse=yes
+# Update package cache
+- xbps: update_cache=yes
+# Upgrade packages
+- xbps: upgrade=yes
+'''
+
+RETURN = '''
+msg:
+ description: Message about results
+ returned: success
+ type: string
+ sample: "System Upgraded"
+packages:
+ description: Packages that are affected/would be affected
+ type: list
+ sample: ["ansible"]
+'''
+
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def is_installed(xbps_output):
+ """Returns package install state"""
+ return bool(len(xbps_output))
+
+
+def query_package(module, xbps_path, name, state="present"):
+ """Returns Package info"""
+ if state == "present":
+ lcmd = "%s %s" % (xbps_path['query'], name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if not is_installed(lstdout):
+ # package is not installed locally
+ return False, False
+
+ rcmd = "%s -Sun" % (xbps_path['install'])
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+ if rrc == 0 or rrc == 17:
+ """Return True to indicate that the package is installed locally,
+ and the result of the version number comparison to determine if the
+ package is up-to-date"""
+ return True, name not in rstdout
+
+ return False, False
+
+
+def update_package_db(module, xbps_path):
+ """Returns True if update_package_db changed"""
+ cmd = "%s -S" % (xbps_path['install'])
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="Could not update package db")
+ if "avg rate" in stdout:
+ return True
+ else:
+ return False
+
+
+def upgrade(module, xbps_path):
+ """Returns true is full upgrade succeeds"""
+ cmdupgrade = "%s -uy" % (xbps_path['install'])
+ cmdneedupgrade = "%s -un" % (xbps_path['install'])
+
+ rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False)
+ if rc == 0:
+ if(len(stdout.splitlines()) == 0):
+ module.exit_json(changed=False, msg='Nothing to upgrade')
+ else:
+ rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
+ if rc == 0:
+ module.exit_json(changed=True, msg='System upgraded')
+ else:
+ module.fail_json(msg="Could not upgrade")
+ else:
+ module.fail_json(msg="Could not upgrade")
+
+
+def remove_packages(module, xbps_path, packages):
+ """Returns true if package removal succeeds"""
+ changed_packages = []
+ # Using a for loop incase of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, xbps_path, package)
+ if not installed:
+ continue
+
+ cmd = "%s -y %s" % (xbps_path['remove'], package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ changed_packages.append(package)
+
+ if len(changed_packages) > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" %
+ len(changed_packages), packages=changed_packages)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, xbps_path, state, packages):
+ """Returns true if package install succeeds."""
+ toInstall = []
+ for i, package in enumerate(packages):
+ """If the package is installed and state == present or state == latest
+ and is up-to-date then skip"""
+ installed, updated = query_package(module, xbps_path, package)
+ if installed and (state == 'present' or
+ (state == 'latest' and updated)):
+ continue
+
+ toInstall.append(package)
+
+ if len(toInstall) == 0:
+ module.exit_json(changed=False, msg="Nothing to Install")
+
+ cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0 and not (state == 'latest' and rc == 17):
+ module.fail_json(msg="failed to install %s" % (package))
+
+ module.exit_json(changed=True, msg="installed %s package(s)"
+ % (len(toInstall)),
+ packages=toInstall)
+
+ module.exit_json(changed=False, msg="package(s) already installed",
+ packages=[])
+
+
+def check_packages(module, xbps_path, packages, state):
+ """Returns change status of command"""
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, xbps_path, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state),
+ packages=would_be_changed)
+ else:
+ module.exit_json(changed=False, msg="package(s) already %s" % state,
+ packages=[])
+
+
+def main():
+ """Returns, calling appropriate command"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, aliases=['pkg', 'package'], type='list'),
+ state=dict(default='present', choices=['present', 'installed',
+ 'latest', 'absent',
+ 'removed']),
+ recurse=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ update_cache=dict(default=True, aliases=['update-cache'],
+ type='bool')
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ supports_check_mode=True)
+
+ xbps_path = dict()
+ xbps_path['install'] = module.get_bin_path('xbps-install', True)
+ xbps_path['query'] = module.get_bin_path('xbps-query', True)
+ xbps_path['remove'] = module.get_bin_path('xbps-remove', True)
+
+ if not os.path.exists(xbps_path['install']):
+ module.fail_json(msg="cannot find xbps, in path %s"
+ % (xbps_path['install']))
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p["update_cache"] and not module.check_mode:
+ changed = update_package_db(module, xbps_path)
+ if p['name'] is None and not p['upgrade']:
+ if changed:
+ module.exit_json(changed=True,
+ msg='Updated the package master lists')
+ else:
+ module.exit_json(changed=False,
+ msg='Package list already up to date')
+
+ if (p['update_cache'] and module.check_mode and not
+ (p['name'] or p['upgrade'])):
+ module.exit_json(changed=True,
+ msg='Would have updated the package cache')
+
+ if p['upgrade']:
+ upgrade(module, xbps_path)
+
+ if p['name']:
+ pkgs = p['name']
+
+ if module.check_mode:
+ check_packages(module, xbps_path, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, xbps_path, p['state'], pkgs)
+ elif p['state'] == 'absent':
+ remove_packages(module, xbps_path, pkgs)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/packaging/os/yum_repository.py b/packaging/os/yum_repository.py
new file mode 100644
index 00000000000..1d00d26f682
--- /dev/null
+++ b/packaging/os/yum_repository.py
@@ -0,0 +1,761 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2015-2016, Jiri Tyr
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.six.moves import configparser
+
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: yum_repository
+author: Jiri Tyr (@jtyr)
+version_added: '2.1'
+short_description: Add and remove YUM repositories
+description:
+ - Add or remove YUM repositories in RPM-based Linux distributions.
+
+options:
+ async:
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+ description:
+ - If set to C(yes) Yum will download packages and metadata from this
+ repo in parallel, if possible.
+ bandwidth:
+ required: false
+ default: 0
+ description:
+ - Maximum available network bandwidth in bytes/second. Used with the
+ I(throttle) option.
+ - If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
+ throttling will be disabled. If I(throttle) is expressed as a data rate
+ (bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
+ throttling).
+ baseurl:
+ required: false
+ default: null
+ description:
+ - URL to the directory where the yum repository's 'repodata' directory
+ lives.
+ - This or the I(mirrorlist) parameter is required if I(state) is set to
+ C(present).
+ cost:
+ required: false
+ default: 1000
+ description:
+ - Relative cost of accessing this repository. Useful for weighing one
+ repo's packages as greater/less than any other.
+ deltarpm_metadata_percentage:
+ required: false
+ default: 100
+ description:
+ - When the relative size of deltarpm metadata vs pkgs is larger than
+ this, deltarpm metadata is not downloaded from the repo. Note that you
+ can give values over C(100), so C(200) means that the metadata is
+ required to be half the size of the packages. Use C(0) to turn off
+ this check, and always download metadata.
+ deltarpm_percentage:
+ required: false
+ default: 75
+ description:
+ - When the relative size of delta vs pkg is larger than this, delta is
+ not used. Use C(0) to turn off delta rpm processing. Local repositories
+ (with file:// I(baseurl)) have delta rpms turned off by default.
+ description:
+ required: false
+ default: null
+ description:
+ - A human readable string describing the repository.
+ - This parameter is only required if I(state) is set to C(present).
+ enabled:
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+ description:
+ - This tells yum whether or not use this repository.
+ enablegroups:
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+ description:
+ - Determines whether yum will allow the use of package groups for this
+ repository.
+ exclude:
+ required: false
+ default: null
+ description:
+ - List of packages to exclude from updates or installs. This should be a
+ space separated list. Shell globs using wildcards (eg. C(*) and C(?))
+ are allowed.
+ - The list can also be a regular YAML array.
+ failovermethod:
+ required: false
+ choices: [roundrobin, priority]
+ default: roundrobin
+ description:
+ - C(roundrobin) randomly selects a URL out of the list of URLs to start
+ with and proceeds through each of them as it encounters a failure
+ contacting the host.
+ - C(priority) starts from the first I(baseurl) listed and reads through
+ them sequentially.
+ file:
+ required: false
+ default: null
+ description:
+ - File to use to save the repo in. Defaults to the value of I(name).
+ gpgcakey:
+ required: false
+ default: null
+ description:
+ - A URL pointing to the ASCII-armored CA key file for the repository.
+ gpgcheck:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - Tells yum whether or not it should perform a GPG signature check on
+ packages.
+ gpgkey:
+ required: false
+ default: null
+ description:
+ - A URL pointing to the ASCII-armored GPG key file for the repository.
+ http_caching:
+ required: false
+ choices: [all, packages, none]
+ default: all
+ description:
+ - Determines how upstream HTTP caches are instructed to handle any HTTP
+ downloads that Yum does.
+ - C(all) means that all HTTP downloads should be cached.
+ - C(packages) means that only RPM package downloads should be cached (but
+ not repository metadata downloads).
+ - C(none) means that no HTTP downloads should be cached.
+ include:
+ required: false
+ default: null
+ description:
+ - Include external configuration file. Both, local path and URL is
+ supported. Configuration file will be inserted at the position of the
+ I(include=) line. Included files may contain further include lines.
+ Yum will abort with an error if an inclusion loop is detected.
+ includepkgs:
+ required: false
+ default: null
+ description:
+ - List of packages you want to only use from a repository. This should be
+ a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
+ are allowed. Substitution variables (e.g. C($releasever)) are honored
+ here.
+ - The list can also be a regular YAML array.
+ ip_resolve:
+ required: false
+ choices: [4, 6, IPv4, IPv6, whatever]
+ default: whatever
+ description:
+ - Determines how yum resolves host names.
+ - C(4) or C(IPv4) - resolve to IPv4 addresses only.
+ - C(6) or C(IPv6) - resolve to IPv6 addresses only.
+ keepalive:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - This tells yum whether or not HTTP/1.1 keepalive should be used with
+ this repository. This can improve transfer speeds by using one
+ connection when downloading multiple files from a repository.
+ keepcache:
+ required: false
+ choices: ['0', '1']
+ default: '1'
+ description:
+ - Either C(1) or C(0). Determines whether or not yum keeps the cache of
+ headers and packages after successful installation.
+ metadata_expire:
+ required: false
+ default: 21600
+ description:
+ - Time (in seconds) after which the metadata will expire.
+ - Default value is 6 hours.
+ metadata_expire_filter:
+ required: false
+ choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
+ default: 'read-only:present'
+ description:
+ - Filter the I(metadata_expire) time, allowing a trade of speed for
+ accuracy if a command doesn't require it. Each yum command can specify
+ that it requires a certain level of timeliness quality from the remote
+ repos. from "I'm about to install/upgrade, so this better be current"
+ to "Anything that's available is good enough".
+ - C(never) - Nothing is filtered, always obey I(metadata_expire).
+ - C(read-only:past) - Commands that only care about past information are
+ filtered from metadata expiring. Eg. I(yum history) info (if history
+ needs to lookup anything about a previous transaction, then by
+ definition the remote package was available in the past).
+ - C(read-only:present) - Commands that are balanced between past and
+ future. Eg. I(yum list yum).
+ - C(read-only:future) - Commands that are likely to result in running
+ other commands which will require the latest metadata. Eg.
+ I(yum check-update).
+ - Note that this option does not override "yum clean expire-cache".
+ metalink:
+ required: false
+ default: null
+ description:
+ - Specifies a URL to a metalink file for the repomd.xml, a list of
+ mirrors for the entire repository are generated by converting the
+ mirrors for the repomd.xml file to a I(baseurl).
+ mirrorlist:
+ required: false
+ default: null
+ description:
+ - Specifies a URL to a file containing a list of baseurls.
+ - This or the I(baseurl) parameter is required if I(state) is set to
+ C(present).
+ mirrorlist_expire:
+ required: false
+ default: 21600
+ description:
+ - Time (in seconds) after which the mirrorlist locally cached will
+ expire.
+ - Default value is 6 hours.
+ name:
+ required: true
+ description:
+ - Unique repository ID.
+ - This parameter is only required if I(state) is set to C(present) or
+ C(absent).
+ params:
+ required: false
+ default: null
+ description:
+ - Option used to allow the user to overwrite any of the other options.
+ To remove an option, set the value of the option to C(null).
+ password:
+ required: false
+ default: null
+ description:
+ - Password to use with the username for basic authentication.
+ priority:
+ required: false
+ default: 99
+ description:
+ - Enforce ordered protection of repositories. The value is an integer
+ from 1 to 99.
+ - This option only works if the YUM Priorities plugin is installed.
+ protect:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - Protect packages from updates from other repositories.
+ proxy:
+ required: false
+ default: null
+ description:
+ - URL to the proxy server that yum should use. Set to C(_none_) to
+ disable the global proxy setting.
+ proxy_password:
+ required: false
+ default: null
+ description:
+ - Username to use for proxy.
+ proxy_username:
+ required: false
+ default: null
+ description:
+ - Password for this proxy.
+ repo_gpgcheck:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - This tells yum whether or not it should perform a GPG signature check
+ on the repodata from this repository.
+ reposdir:
+ required: false
+ default: /etc/yum.repos.d
+ description:
+ - Directory where the C(.repo) files will be stored.
+ retries:
+ required: false
+ default: 10
+ description:
+ - Set the number of times any attempt to retrieve a file should retry
+ before returning an error. Setting this to C(0) makes yum try forever.
+ s3_enabled:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - Enables support for S3 repositories.
+ - This option only works if the YUM S3 plugin is installed.
+ skip_if_unavailable:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - If set to C(yes) yum will continue running if this repository cannot be
+ contacted for any reason. This should be set carefully as all repos are
+ consulted for any given command.
+ ssl_check_cert_permissions:
+ required: false
+ choices: ['yes', 'no']
+ default: 'no'
+ description:
+ - Whether yum should check the permissions on the paths for the
+ certificates on the repository (both remote and local).
+ - If we can't read any of the files then yum will force
+ I(skip_if_unavailable) to be C(yes). This is most useful for non-root
+ processes which use yum on repos that have client cert files which are
+ readable only by root.
+ sslcacert:
+ required: false
+ default: null
+ description:
+ - Path to the directory containing the databases of the certificate
+ authorities yum should use to verify SSL certificates.
+ sslclientcert:
+ required: false
+ default: null
+ description:
+ - Path to the SSL client certificate yum should use to connect to
+ repos/remote sites.
+ sslclientkey:
+ required: false
+ default: null
+ description:
+ - Path to the SSL client key yum should use to connect to repos/remote
+ sites.
+ sslverify:
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+ description:
+ - Defines whether yum should verify SSL certificates/hosts at all.
+ state:
+ required: false
+ choices: [absent, present]
+ default: present
+ description:
+ - State of the repo file.
+ throttle:
+ required: false
+ default: null
+ description:
+ - Enable bandwidth throttling for downloads.
+ - This option can be expressed as a absolute data rate in bytes/sec. An
+ SI prefix (k, M or G) may be appended to the bandwidth value.
+ timeout:
+ required: false
+ default: 30
+ description:
+ - Number of seconds to wait for a connection before timing out.
+ ui_repoid_vars:
+ required: false
+ default: releasever basearch
+ description:
+ - When a repository id is displayed, append these yum variables to the
+ string if they are used in the I(baseurl)/etc. Variables are appended
+ in the order listed (and found).
+ username:
+ required: false
+ default: null
+ description:
+ - Username to use for basic authentication to a repo or really any url.
+
+extends_documentation_fragment:
+ - files
+
+notes:
+ - All comments will be removed if modifying an existing repo file.
+ - Section order is preserved in an existing repo file.
+ - Parameters in a section are ordered alphabetically in an existing repo
+ file.
+ - The repo file will be automatically deleted if it contains no repository.
+'''
+
+EXAMPLES = '''
+- name: Add repository
+ yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+
+- name: Add multiple repositories into the same file (1/2)
+ yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ file: external_repos
+ baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+ gpgcheck: no
+
+- name: Add multiple repositories into the same file (2/2)
+ yum_repository:
+ name: rpmforge
+ description: RPMforge YUM repo
+ file: external_repos
+ baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
+ mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
+ enabled: no
+
+- name: Remove repository
+ yum_repository:
+ name: epel
+ state: absent
+
+- name: Remove repository from a specific repo file
+ yum_repository:
+ name: epel
+ file: external_repos
+ state: absent
+
+#
+# Allow to overwrite the yum_repository parameters by defining the parameters
+# as a variable in the defaults or vars file:
+#
+# my_role_somerepo_params:
+# # Disable GPG checking
+# gpgcheck: no
+# # Remove the gpgkey option
+# gpgkey: null
+#
+- name: Add Some repo
+ yum_repository:
+ name: somerepo
+ description: Some YUM repo
+ baseurl: http://server.com/path/to/the/repo
+ gpgkey: http://server.com/keys/somerepo.pub
+ gpgcheck: yes
+ params: "{{ my_role_somerepo_params }}"
+'''
+
+RETURN = '''
+repo:
+ description: repository name
+ returned: success
+ type: string
+ sample: "epel"
+state:
+ description: state of the target, after execution
+ returned: success
+ type: string
+ sample: "present"
+'''
+
+
+class YumRepo(object):
+ # Class global variables
+ module = None
+ params = None
+ section = None
+ repofile = configparser.RawConfigParser()
+
+ # List of parameters which will be allowed in the repo file output
+ allowed_params = [
+ 'async',
+ 'bandwidth',
+ 'baseurl',
+ 'cost',
+ 'deltarpm_metadata_percentage',
+ 'deltarpm_percentage',
+ 'enabled',
+ 'enablegroups',
+ 'exclude',
+ 'failovermethod',
+ 'gpgcakey',
+ 'gpgcheck',
+ 'gpgkey',
+ 'http_caching',
+ 'include',
+ 'includepkgs',
+ 'ip_resolve',
+ 'keepalive',
+ 'keepcache',
+ 'metadata_expire',
+ 'metadata_expire_filter',
+ 'metalink',
+ 'mirrorlist',
+ 'mirrorlist_expire',
+ 'name',
+ 'password',
+ 'priority',
+ 'protect',
+ 'proxy',
+ 'proxy_password',
+ 'proxy_username',
+ 'repo_gpgcheck',
+ 'retries',
+ 's3_enabled',
+ 'skip_if_unavailable',
+ 'sslcacert',
+ 'ssl_check_cert_permissions',
+ 'sslclientcert',
+ 'sslclientkey',
+ 'sslverify',
+ 'throttle',
+ 'timeout',
+ 'ui_repoid_vars',
+ 'username']
+
+ # List of parameters which can be a list
+ list_params = ['exclude', 'includepkgs']
+
+ def __init__(self, module):
+ # To be able to use fail_json
+ self.module = module
+ # Shortcut for the params
+ self.params = self.module.params
+ # Section is always the repoid
+ self.section = self.params['repoid']
+
+ # Check if repo directory exists
+ repos_dir = self.params['reposdir']
+ if not os.path.isdir(repos_dir):
+ self.module.fail_json(
+ msg="Repo directory '%s' does not exist." % repos_dir)
+
+ # Set dest; also used to set dest parameter for the FS attributes
+ self.params['dest'] = os.path.join(
+ repos_dir, "%s.repo" % self.params['file'])
+
+ # Read the repo file if it exists
+ if os.path.isfile(self.params['dest']):
+ self.repofile.read(self.params['dest'])
+
+ def add(self):
+ # Remove already existing repo and create a new one
+ if self.repofile.has_section(self.section):
+ self.repofile.remove_section(self.section)
+
+ # Add section
+ self.repofile.add_section(self.section)
+
+ # Baseurl/mirrorlist is not required because for removal we need only
+ # the repo name. This is why we check if the baseurl/mirrorlist is
+ # defined.
+ if (self.params['baseurl'], self.params['mirrorlist']) == (None, None):
+ self.module.fail_json(
+ msg='Paramater "baseurl" or "mirrorlist" is required for '
+ 'adding a new repo.')
+
+ # Set options
+ for key, value in sorted(self.params.items()):
+ if key in self.list_params and isinstance(value, list):
+ # Join items into one string for specific parameters
+ value = ' '.join(value)
+ elif isinstance(value, bool):
+ # Convert boolean value to integer
+ value = int(value)
+
+ # Set the value only if it was defined (default is None)
+ if value is not None and key in self.allowed_params:
+ self.repofile.set(self.section, key, value)
+
+ def save(self):
+ if len(self.repofile.sections()):
+ # Write data into the file
+ try:
+ fd = open(self.params['dest'], 'w')
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot open repo file %s." % self.params['dest'],
+ details=str(e))
+
+ self.repofile.write(fd)
+
+ try:
+ fd.close()
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot write repo file %s." % self.params['dest'],
+ details=str(e))
+ else:
+ # Remove the file if there are not repos
+ try:
+ os.remove(self.params['dest'])
+ except OSError:
+ e = get_exception()
+ self.module.fail_json(
+ msg=(
+ "Cannot remove empty repo file %s." %
+ self.params['dest']),
+ details=str(e))
+
+ def remove(self):
+ # Remove section if exists
+ if self.repofile.has_section(self.section):
+ self.repofile.remove_section(self.section)
+
+ def dump(self):
+ repo_string = ""
+
+ # Compose the repo file
+ for section in sorted(self.repofile.sections()):
+ repo_string += "[%s]\n" % section
+
+ for key, value in sorted(self.repofile.items(section)):
+ repo_string += "%s = %s\n" % (key, value)
+
+ repo_string += "\n"
+
+ return repo_string
+
+
+def main():
+ # Module settings
+ module = AnsibleModule(
+ argument_spec=dict(
+ async=dict(type='bool'),
+ bandwidth=dict(),
+ baseurl=dict(),
+ cost=dict(),
+ deltarpm_metadata_percentage=dict(),
+ deltarpm_percentage=dict(),
+ description=dict(),
+ enabled=dict(type='bool'),
+ enablegroups=dict(type='bool'),
+ exclude=dict(),
+ failovermethod=dict(choices=['roundrobin', 'priority']),
+ file=dict(),
+ gpgcakey=dict(),
+ gpgcheck=dict(type='bool'),
+ gpgkey=dict(),
+ http_caching=dict(choices=['all', 'packages', 'none']),
+ include=dict(),
+ includepkgs=dict(),
+ ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
+ keepalive=dict(type='bool'),
+ keepcache=dict(choices=['0', '1']),
+ metadata_expire=dict(),
+ metadata_expire_filter=dict(
+ choices=[
+ 'never',
+ 'read-only:past',
+ 'read-only:present',
+ 'read-only:future']),
+ metalink=dict(),
+ mirrorlist=dict(),
+ mirrorlist_expire=dict(),
+ name=dict(required=True),
+ params=dict(type='dict'),
+ password=dict(no_log=True),
+ priority=dict(),
+ protect=dict(type='bool'),
+ proxy=dict(),
+ proxy_password=dict(no_log=True),
+ proxy_username=dict(),
+ repo_gpgcheck=dict(type='bool'),
+ reposdir=dict(default='/etc/yum.repos.d', type='path'),
+ retries=dict(),
+ s3_enabled=dict(type='bool'),
+ skip_if_unavailable=dict(type='bool'),
+ sslcacert=dict(),
+ ssl_check_cert_permissions=dict(type='bool'),
+ sslclientcert=dict(),
+ sslclientkey=dict(),
+ sslverify=dict(type='bool'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ throttle=dict(),
+ timeout=dict(),
+ ui_repoid_vars=dict(),
+ username=dict(),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Update module parameters by user's parameters if defined
+ if 'params' in module.params and isinstance(module.params['params'], dict):
+ module.params.update(module.params['params'])
+ # Remove the params
+ module.params.pop('params', None)
+
+ name = module.params['name']
+ state = module.params['state']
+
+ # Check if required parameters are present
+ if state == 'present':
+ if (
+ module.params['baseurl'] is None and
+ module.params['mirrorlist'] is None):
+ module.fail_json(
+ msg="Parameter 'baseurl' or 'mirrorlist' is required.")
+ if module.params['description'] is None:
+ module.fail_json(
+ msg="Parameter 'description' is required.")
+
+ # Rename "name" and "description" to ensure correct key sorting
+ module.params['repoid'] = module.params['name']
+ module.params['name'] = module.params['description']
+ del module.params['description']
+
+ # Define repo file name if it doesn't exist
+ if module.params['file'] is None:
+ module.params['file'] = module.params['repoid']
+
+ # Instantiate the YumRepo object
+ yumrepo = YumRepo(module)
+
+ # Get repo status before change
+ diff = {
+ 'before_header': yumrepo.params['dest'],
+ 'before': yumrepo.dump(),
+ 'after_header': yumrepo.params['dest'],
+ 'after': ''
+ }
+
+ # Perform action depending on the state
+ if state == 'present':
+ yumrepo.add()
+ elif state == 'absent':
+ yumrepo.remove()
+
+ # Get repo status after change
+ diff['after'] = yumrepo.dump()
+
+ # Compare repo states
+ changed = diff['before'] != diff['after']
+
+ # Save the file only if not in check mode and if there was a change
+ if not module.check_mode and changed:
+ yumrepo.save()
+
+ # Change file attributes if needed
+ if os.path.isfile(module.params['dest']):
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ # Print status of the change
+ module.exit_json(changed=changed, repo=name, state=state, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py
index 5cf2f742f3c..837a7ef4774 100644
--- a/packaging/os/zypper.py
+++ b/packaging/os/zypper.py
@@ -26,14 +26,22 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+from xml.dom.minidom import parseString as parseXML
import re
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: zypper
author:
- "Patrick Callahan (@dirtyharrycallahan)"
- "Alexander Gubin (@alxgu)"
+ - "Thomas O'Donnell (@andytom)"
+ - "Robin Roth (@robinro)"
+ - "Andrii Radyk (@AnderEnder)"
version_added: "1.2"
short_description: Manage packages on SUSE and openSUSE
description:
@@ -41,7 +49,10 @@
options:
name:
description:
- - package name or package specifier with version C(name) or C(name-1.0). You can also pass a url or a local path to a rpm file.
+ - Package name C(name) or package specifier.
+ - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to update the package within the version range given.
+ - You can also pass a url or a local path to a rpm file.
+ - When using state=latest, this can be '*', which updates all installed packages.
required: true
aliases: [ 'pkg' ]
state:
@@ -56,7 +67,7 @@
description:
- The type of package to be operated on.
required: false
- choices: [ package, patch, pattern, product, srcpackage ]
+ choices: [ package, patch, pattern, product, srcpackage, application ]
default: "package"
version_added: "2.0"
disable_gpg_check:
@@ -67,198 +78,343 @@
required: false
default: "no"
choices: [ "yes", "no" ]
- aliases: []
disable_recommends:
version_added: "1.8"
description:
- - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages.
+ - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages.
required: false
default: "yes"
choices: [ "yes", "no" ]
+ force:
+ version_added: "2.2"
+ description:
+ - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ update_cache:
+ version_added: "2.2"
+ description:
+ - Run the equivalent of C(zypper refresh) before the operation.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [ "refresh" ]
+ oldpackage:
+ version_added: "2.2"
+ description:
+ - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a version is specified as part of the package name.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
-notes: []
# informational: requirements for nodes
-requirements: [ zypper, rpm ]
-author: Patrick Callahan
+requirements:
+ - "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+ - rpm
'''
EXAMPLES = '''
# Install "nmap"
-- zypper: name=nmap state=present
+- zypper:
+ name: nmap
+ state: present
# Install apache2 with recommended packages
-- zypper: name=apache2 state=present disable_recommends=no
+- zypper:
+ name: apache2
+ state: present
+ disable_recommends: no
+
+# Apply a given patch
+- zypper:
+ name: openSUSE-2016-128
+ state: present
+ type: patch
# Remove the "nmap" package
-- zypper: name=nmap state=absent
+- zypper:
+ name: nmap
+ state: absent
# Install the nginx rpm from a remote repo
-- zypper: name=http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm state=present
+- zypper:
+ name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm'
+ state: present
# Install local rpm file
-- zypper: name=/tmp/fancy-software.rpm state=present
+- zypper:
+ name: /tmp/fancy-software.rpm
+ state: present
+
+# Update all packages
+- zypper:
+ name: *
+ state: latest
+
+# Apply all available patches
+- zypper:
+ name: *
+ state: latest
+ type: patch
+
+# Refresh repositories and update package "openssl"
+- zypper:
+ name: openssl
+ state: present
+ update_cache: yes
+
+# Install specific version (possible comparisons: <, >, <=, >=, =)
+- zypper:
+ name: 'docker>=1.10'
+ state: installed
'''
-# Function used for getting zypper version
-def zypper_version(module):
- """Return (rc, message) tuple"""
- cmd = ['/usr/bin/zypper', '-V']
- rc, stdout, stderr = module.run_command(cmd, check_rc=False)
- if rc == 0:
- return rc, stdout
- else:
- return rc, stderr
-# Function used for getting versions of currently installed packages.
-def get_current_version(m, packages):
- cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n']
- cmd.extend(packages)
+def split_name_version(name):
+ """splits of the package name and desired version
- rc, stdout, stderr = m.run_command(cmd, check_rc=False)
+ example formats:
+ - docker>=1.10
+ - apache=2.4
+
+ Allowed version specifiers: <, >, <=, >=, =
+ Allowed version format: [0-9.-]*
+
+ Also allows a prefix indicating remove "-", "~" or install "+"
+ """
+
+ prefix = ''
+ if name[0] in ['-', '~', '+']:
+ prefix = name[0]
+ name = name[1:]
- current_version = {}
- rpmoutput_re = re.compile('^(\S+) (\S+)$')
-
- for stdoutline in stdout.splitlines():
- match = rpmoutput_re.match(stdoutline)
- if match == None:
- return None
- package = match.group(1)
- version = match.group(2)
- current_version[package] = version
-
- for package in packages:
- if package not in current_version:
- print package + ' was not returned by rpm \n'
- return None
-
- return current_version
-
-
-# Function used to find out if a package is currently installed.
-def get_package_state(m, packages):
- for i in range(0, len(packages)):
- # Check state of a local rpm-file
- if ".rpm" in packages[i]:
- # Check if rpm file is available
- package = packages[i]
- if not os.path.isfile(package) and not '://' in package:
- stderr = "No Package file matching '%s' found on system" % package
- m.fail_json(msg=stderr)
- # Get packagename from rpm file
- cmd = ['/bin/rpm', '--query', '--qf', '%{NAME}', '--package']
- cmd.append(package)
- rc, stdout, stderr = m.run_command(cmd, check_rc=False)
- packages[i] = stdout
-
- cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n']
+ version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$')
+ try:
+ reres = version_check.match(name)
+ name, version = reres.groups()
+ return prefix, name, version
+ except:
+ return prefix, name, None
+
+
+def get_want_state(m, names, remove=False):
+ packages_install = {}
+ packages_remove = {}
+ urls = []
+ for name in names:
+ if '://' in name or name.endswith('.rpm'):
+ urls.append(name)
+ else:
+ prefix, pname, version = split_name_version(name)
+ if prefix in ['-', '~']:
+ packages_remove[pname] = version
+ elif prefix == '+':
+ packages_install[pname] = version
+ else:
+ if remove:
+ packages_remove[pname] = version
+ else:
+ packages_install[pname] = version
+ return packages_install, packages_remove, urls
+
+
+def get_installed_state(m, packages):
+ "get installed state of packages"
+
+ cmd = get_cmd(m, 'search')
+ cmd.extend(['--match-exact', '--details', '--installed-only'])
cmd.extend(packages)
+ return parse_zypper_xml(m, cmd, fail_not_found=False)[0]
+
+def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
- installed_state = {}
- rpmoutput_re = re.compile('^package (\S+) (.*)$')
- for stdoutline in stdout.splitlines():
- match = rpmoutput_re.match(stdoutline)
- if match == None:
- return None
- package = match.group(1)
- result = match.group(2)
- if result == 'is installed':
- installed_state[package] = True
+ dom = parseXML(stdout)
+ if rc == 104:
+ # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found)
+ if fail_not_found:
+ errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data
+ m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
else:
- installed_state[package] = False
-
- for package in packages:
- if package not in installed_state:
- print package + ' was not returned by rpm \n'
- return None
-
- return installed_state
-
-# Function used to make sure a package is present.
-def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper):
- packages = []
- for package in name:
- if installed_state[package] is False:
- packages.append(package)
- if len(packages) != 0:
- cmd = ['/usr/bin/zypper', '--non-interactive']
- # add global options before zypper command
- if disable_gpg_check:
- cmd.append('--no-gpg-checks')
- cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type])
- # add install parameter
- if disable_recommends and not old_zypper:
- cmd.append('--no-recommends')
- cmd.extend(packages)
- rc, stdout, stderr = m.run_command(cmd, check_rc=False)
+ return {}, rc, stdout, stderr
+ elif rc in [0, 106, 103]:
+ # zypper exit codes
+ # 0: success
+ # 106: signature verification failed
+ # 103: zypper was upgraded, run same command again
+ if packages is None:
+ firstrun = True
+ packages = {}
+ solvable_list = dom.getElementsByTagName('solvable')
+ for solvable in solvable_list:
+ name = solvable.getAttribute('name')
+ packages[name] = {}
+ packages[name]['version'] = solvable.getAttribute('edition')
+ packages[name]['oldversion'] = solvable.getAttribute('edition-old')
+ status = solvable.getAttribute('status')
+ packages[name]['installed'] = status == "installed"
+ packages[name]['group'] = solvable.parentNode.nodeName
+ if rc == 103 and firstrun:
+ # if this was the first run and it failed with 103
+ # run zypper again with the same command to complete update
+ return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages)
+
+ return packages, rc, stdout, stderr
+ m.fail_json(msg='Zypper run command failed with return code %s.'%rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+
+def get_cmd(m, subcommand):
+ "puts together the basic zypper command arguments with those passed to the module"
+ is_install = subcommand in ['install', 'update', 'patch']
+ is_refresh = subcommand == 'refresh'
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout']
+
+ # add global options before zypper command
+ if (is_install or is_refresh) and m.params['disable_gpg_check']:
+ cmd.append('--no-gpg-checks')
- if rc == 0:
- changed=True
- else:
- changed=False
+ cmd.append(subcommand)
+ if subcommand != 'patch' and not is_refresh:
+ cmd.extend(['--type', m.params['type']])
+ if m.check_mode and subcommand != 'search':
+ cmd.append('--dry-run')
+ if is_install:
+ cmd.append('--auto-agree-with-licenses')
+ if m.params['disable_recommends']:
+ cmd.append('--no-recommends')
+ if m.params['force']:
+ cmd.append('--force')
+ if m.params['oldpackage']:
+ cmd.append('--oldpackage')
+ return cmd
+
+
+def set_diff(m, retvals, result):
+ # TODO: if there is only one package, set before/after to version numbers
+ packages = {'installed': [], 'removed': [], 'upgraded': []}
+ if result:
+ for p in result:
+ group = result[p]['group']
+ if group == 'to-upgrade':
+ versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')'
+ packages['upgraded'].append(p + versions)
+ elif group == 'to-install':
+ packages['installed'].append(p)
+ elif group == 'to-remove':
+ packages['removed'].append(p)
+
+ output = ''
+ for state in packages:
+ if packages[state]:
+ output += state + ': ' + ', '.join(packages[state]) + '\n'
+ if 'diff' not in retvals:
+ retvals['diff'] = {}
+ if 'prepared' not in retvals['diff']:
+ retvals['diff']['prepared'] = output
else:
- rc = 0
- stdout = ''
- stderr = ''
- changed=False
+ retvals['diff']['prepared'] += '\n' + output
+
+
+def package_present(m, name, want_latest):
+ "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ name_install, name_remove, urls = get_want_state(m, name)
+
+ # if a version string is given, pass it to zypper
+ install_version = [p+name_install[p] for p in name_install if name_install[p]]
+ remove_version = [p+name_remove[p] for p in name_remove if name_remove[p]]
+
+ # add oldpackage flag when a version is given to allow downgrades
+ if install_version or remove_version:
+ m.params['oldpackage'] = True
+
+ if not want_latest:
+ # for state=present: filter out already installed packages
+ install_and_remove = name_install.copy()
+ install_and_remove.update(name_remove)
+ prerun_state = get_installed_state(m, install_and_remove)
+ # generate lists of packages to install or remove
+ name_install = [p for p in name_install if p not in prerun_state]
+ name_remove = [p for p in name_remove if p in prerun_state]
+ if not any((name_install, name_remove, urls, install_version, remove_version)):
+ # nothing to install/remove and nothing to update
+ return None, retvals
+
+ # zypper install also updates packages
+ cmd = get_cmd(m, 'install')
+ cmd.append('--')
+ cmd.extend(urls)
+
+ # pass packages with version information
+ cmd.extend(install_version)
+ cmd.extend(['-%s' % p for p in remove_version])
+
+ # allow for + or - prefixes in install/remove lists
+ # do this in one zypper run to allow for dependency-resolution
+ # for example "-exim postfix" runs without removing packages depending on mailserver
+ cmd.extend(name_install)
+ cmd.extend(['-%s' % p for p in name_remove])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return result, retvals
+
+
+def package_update_all(m):
+ "run update or patch on all available packages"
+
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ if m.params['type'] == 'patch':
+ cmdname = 'patch'
+ else:
+ cmdname = 'update'
- return (rc, stdout, stderr, changed)
+ cmd = get_cmd(m, cmdname)
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
-# Function used to make sure a package is the latest available version.
-def package_latest(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper):
- # first of all, make sure all the packages are installed
- (rc, stdout, stderr, changed) = package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper)
+def package_absent(m, name):
+ "remove the packages in name"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ # Get package state
+ name_install, name_remove, urls = get_want_state(m, name, remove=True)
+ if name_install:
+ m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.")
+ if urls:
+ m.fail_json(msg="Can not remove via URL.")
+ if m.params['type'] == 'patch':
+ m.fail_json(msg="Can not remove patches.")
+ prerun_state = get_installed_state(m, name_remove)
+ remove_version = [p+name_remove[p] for p in name_remove if name_remove[p]]
+ name_remove = [p for p in name_remove if p in prerun_state]
+ if not name_remove and not remove_version:
+ return None, retvals
- # if we've already made a change, we don't have to check whether a version changed
- if not changed:
- pre_upgrade_versions = get_current_version(m, name)
+ cmd = get_cmd(m, 'remove')
+ cmd.extend(name_remove)
+ cmd.extend(remove_version)
- cmd = ['/usr/bin/zypper', '--non-interactive']
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
- if disable_gpg_check:
- cmd.append('--no-gpg-checks')
- if old_zypper:
- cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type])
- else:
- cmd.extend(['update', '--auto-agree-with-licenses', '-t', package_type])
+def repo_refresh(m):
+ "update the repositories"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
- cmd.extend(name)
- rc, stdout, stderr = m.run_command(cmd, check_rc=False)
+ cmd = get_cmd(m, 'refresh')
- # if we've already made a change, we don't have to check whether a version changed
- if not changed:
- post_upgrade_versions = get_current_version(m, name)
- if pre_upgrade_versions != post_upgrade_versions:
- changed = True
-
- return (rc, stdout, stderr, changed)
-
-# Function used to make sure a package is not installed.
-def package_absent(m, name, installed_state, package_type, old_zypper):
- packages = []
- for package in name:
- if installed_state[package] is True:
- packages.append(package)
- if len(packages) != 0:
- cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type]
- cmd.extend(packages)
- rc, stdout, stderr = m.run_command(cmd)
-
- if rc == 0:
- changed=True
- else:
- changed=False
- else:
- rc = 0
- stdout = ''
- stderr = ''
- changed=False
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
- return (rc, stdout, stderr, changed)
+ return retvals
# ===========================================
# Main control flow
@@ -268,57 +424,54 @@ def main():
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='list'),
state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
- type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage']),
+ type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
disable_gpg_check = dict(required=False, default='no', type='bool'),
disable_recommends = dict(required=False, default='yes', type='bool'),
+ force = dict(required=False, default='no', type='bool'),
+ update_cache = dict(required=False, aliases=['refresh'], default='no', type='bool'),
+ oldpackage = dict(required=False, default='no', type='bool'),
),
- supports_check_mode = False
+ supports_check_mode = True
)
+ name = module.params['name']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
- params = module.params
+ # remove empty strings from package list
+ name = filter(None, name)
- name = params['name']
- state = params['state']
- type_ = params['type']
- disable_gpg_check = params['disable_gpg_check']
- disable_recommends = params['disable_recommends']
+ # Refresh repositories
+ if update_cache:
+ retvals = repo_refresh(module)
- rc = 0
- stdout = ''
- stderr = ''
- result = {}
- result['name'] = name
- result['state'] = state
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper refresh run failed.", **retvals)
- rc, out = zypper_version(module)
- match = re.match(r'zypper\s+(\d+)\.(\d+)\.(\d+)', out)
- if not match or int(match.group(1)) > 0:
- old_zypper = False
+ # Perform requested action
+ if name == ['*'] and state == 'latest':
+ packages_changed, retvals = package_update_all(module)
else:
- old_zypper = True
+ if state in ['absent', 'removed']:
+ packages_changed, retvals = package_absent(module, name)
+ elif state in ['installed', 'present', 'latest']:
+ packages_changed, retvals = package_present(module, name, state == 'latest')
- # Get package state
- installed_state = get_package_state(module, name)
+ retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed)
- # Perform requested action
- if state in ['installed', 'present']:
- (rc, stdout, stderr, changed) = package_present(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper)
- elif state in ['absent', 'removed']:
- (rc, stdout, stderr, changed) = package_absent(module, name, installed_state, type_, old_zypper)
- elif state == 'latest':
- (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper)
-
- if rc != 0:
- if stderr:
- module.fail_json(msg=stderr)
- else:
- module.fail_json(msg=stdout)
+ if module._diff:
+ set_diff(module, retvals, packages_changed)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper run failed.", **retvals)
- result['changed'] = changed
+ if not retvals['changed']:
+ del retvals['stdout']
+ del retvals['stderr']
- module.exit_json(**result)
+ module.exit_json(name=name, state=state, update_cache=update_cache, **retvals)
# import module snippets
-from ansible.module_utils.basic import *
-main()
+from ansible.module_utils.basic import AnsibleModule
+if __name__ == "__main__":
+ main()
diff --git a/packaging/os/zypper_repository.py b/packaging/os/zypper_repository.py
index 446723ef042..187e5803674 100644
--- a/packaging/os/zypper_repository.py
+++ b/packaging/os/zypper_repository.py
@@ -20,6 +20,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: zypper_repository
@@ -55,46 +59,114 @@
- Whether to disable GPG signature checking of
all packages. Has an effect only if state is
I(present).
+ - Needs zypper version >= 1.6.2.
required: false
default: "no"
choices: [ "yes", "no" ]
- aliases: []
- refresh:
+ autorefresh:
description:
- Enable autorefresh of the repository.
required: false
default: "yes"
choices: [ "yes", "no" ]
- aliases: []
-notes: []
-requirements: [ zypper ]
+ aliases: [ "refresh" ]
+ priority:
+ description:
+ - Set priority of repository. Packages will always be installed
+ from the repository with the smallest priority number.
+ - Needs zypper version >= 1.12.25.
+ required: false
+ version_added: "2.1"
+ overwrite_multiple:
+ description:
+ - Overwrite multiple repository entries, if repositories with both name and
+ URL already exist.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ version_added: "2.1"
+ auto_import_keys:
+ description:
+ - Automatically import the gpg signing key of the new or changed repository.
+ - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent).
+ - Implies runrefresh.
+ required: false
+ default: "no"
+ choices: ["yes", "no"]
+ version_added: "2.2"
+ runrefresh:
+ description:
+ - Refresh the package list of the given repository.
+ - Can be used with repo=* to refresh all repositories.
+ required: false
+ default: "no"
+ choices: ["yes", "no"]
+ version_added: "2.2"
+ enabled:
+ description:
+ - Set repository to enabled (or disabled).
+ required: false
+ default: "yes"
+ choices: ["yes", "no"]
+ version_added: "2.2"
+
+
+requirements:
+ - "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
'''
EXAMPLES = '''
# Add NVIDIA repository for graphics drivers
-- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=present
+- zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: present
# Remove NVIDIA repository
-- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=absent
+- zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: absent
# Add python development repository
-- zypper_repository: repo=http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo
+- zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo'
+
+# Refresh all repos
+- zypper_repository:
+ repo: *
+ runrefresh: yes
+
+# Add a repo and add it's gpg key
+- zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/'
+ auto_import_keys: yes
+
+# Force refresh of a repository
+- zypper_repository:
+ repo: 'http://my_internal_ci_repo/repo
+ name: my_ci_repo
+ state: present
+ runrefresh: yes
'''
REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
-def zypper_version(module):
- """Return (rc, message) tuple"""
- cmd = ['/usr/bin/zypper', '-V']
- rc, stdout, stderr = module.run_command(cmd, check_rc=False)
- if rc == 0:
- return rc, stdout
- else:
- return rc, stderr
+from distutils.version import LooseVersion
+
+def _get_cmd(*args):
+ """Combines the non-interactive zypper command with arguments/subcommands"""
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive']
+ cmd.extend(args)
+
+ return cmd
+
def _parse_repos(module):
- """parses the output of zypper -x lr and returns a parse repo dictionary"""
- cmd = ['/usr/bin/zypper', '-x', 'lr']
+ """parses the output of zypper --xmlout repos and return a parse repo dictionary"""
+ cmd = _get_cmd('--xmlout', 'repos')
+
from xml.dom.minidom import parseString as parseXML
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
@@ -113,119 +185,129 @@ def _parse_repos(module):
elif rc == 6:
return []
else:
- d = { 'zypper_exit_code': rc }
- if stderr:
- d['stderr'] = stderr
- if stdout:
- d['stdout'] = stdout
- module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), **d)
-
-def _parse_repos_old(module):
- """parses the output of zypper sl and returns a parse repo dictionary"""
- cmd = ['/usr/bin/zypper', 'sl']
- repos = []
- rc, stdout, stderr = module.run_command(cmd, check_rc=True)
- for line in stdout.split('\n'):
- matched = re.search(r'\d+\s+\|\s+(?P\w+)\s+\|\s+(?P\w+)\s+\|\s+(?P\w+)\s+\|\s+(?P\w+)\s+\|\s+(?P.*)', line)
- if matched == None:
- continue
-
- m = matched.groupdict()
- m['alias']= m['name']
- m['priority'] = 100
- m['gpgcheck'] = 1
- repos.append(m)
-
- return repos
-
-def repo_exists(module, old_zypper, **kwargs):
-
- def repo_subset(realrepo, repocmp):
- for k in repocmp:
- if k not in realrepo:
- return False
-
- for k, v in realrepo.items():
- if k in repocmp:
- if v.rstrip("/") != repocmp[k].rstrip("/"):
- return False
- return True
-
- if old_zypper:
- repos = _parse_repos_old(module)
- else:
- repos = _parse_repos(module)
+ module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
- for repo in repos:
- if repo_subset(repo, kwargs):
+def _repo_changes(realrepo, repocmp):
+ "Check whether the 2 given repos have different settings."
+ for k in repocmp:
+ if repocmp[k] and k not in realrepo:
return True
+
+ for k, v in realrepo.items():
+ if k in repocmp and repocmp[k]:
+ valold = str(repocmp[k] or "")
+ valnew = v or ""
+ if k == "url":
+ valold, valnew = valold.rstrip("/"), valnew.rstrip("/")
+ if valold != valnew:
+ return True
return False
+def repo_exists(module, repodata, overwrite_multiple):
+ """Check whether the repository already exists.
-def add_repo(module, repo, alias, description, disable_gpg_check, old_zypper, refresh):
- if old_zypper:
- cmd = ['/usr/bin/zypper', 'sa']
- else:
- cmd = ['/usr/bin/zypper', 'ar', '--check']
+ returns (exists, mod, old_repos)
+ exists: whether a matching (name, URL) repo exists
+ mod: whether there are changes compared to the existing repo
+ old_repos: list of matching repos
+ """
+ existing_repos = _parse_repos(module)
- if repo.startswith("file:/") and old_zypper:
- cmd.extend(['-t', 'Plaindir'])
- else:
- cmd.extend(['-t', 'plaindir'])
+ # look for repos that have matching alias or url to the one searched
+ repos = []
+ for kw in ['alias', 'url']:
+ name = repodata[kw]
+ for oldr in existing_repos:
+ if repodata[kw] == oldr[kw] and oldr not in repos:
+ repos.append(oldr)
+
+ if len(repos) == 0:
+ # Repo does not exist yet
+ return (False, False, None)
+ elif len(repos) == 1:
+ # Found an existing repo, look for changes
+ has_changes = _repo_changes(repos[0], repodata)
+ return (True, has_changes, repos)
+ elif len(repos) >= 2:
+ if overwrite_multiple:
+ # Found two repos and want to overwrite_multiple
+ return (True, True, repos)
+ else:
+ errmsg = 'More than one repo matched "%s": "%s".' % (name, repos)
+ errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten'
+ module.fail_json(msg=errmsg)
+
+
+def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
+ "Adds the repo, removes old repos before, that would conflict."
+ repo = repodata['url']
+ cmd = _get_cmd('addrepo', '--check')
+ if repodata['name']:
+ cmd.extend(['--name', repodata['name']])
+
+ # priority on addrepo available since 1.12.25
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336
+ if repodata['priority']:
+ if zypper_version >= LooseVersion('1.12.25'):
+ cmd.extend(['--priority', str(repodata['priority'])])
+ else:
+ warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
- if description:
- cmd.extend(['--name', description])
+ if repodata['enabled'] == '0':
+ cmd.append('--disable')
- if disable_gpg_check and not old_zypper:
- cmd.append('--no-gpgcheck')
+ # gpgcheck available since 1.6.2
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449
+ # the default changed in the past, so don't assume a default here and show warning for old zypper versions
+ if zypper_version >= LooseVersion('1.6.2'):
+ if repodata['gpgcheck'] == '1':
+ cmd.append('--gpgcheck')
+ else:
+ cmd.append('--no-gpgcheck')
+ else:
+ warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
- if refresh:
+ if repodata['autorefresh'] == '1':
cmd.append('--refresh')
cmd.append(repo)
if not repo.endswith('.repo'):
- cmd.append(alias)
+ cmd.append(repodata['alias'])
+
+ if old_repos is not None:
+ for oldrepo in old_repos:
+ remove_repo(module, oldrepo['url'])
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
- changed = rc == 0
- if rc == 0:
- changed = True
- elif 'already exists. Please use another alias' in stderr:
- changed = False
- else:
- #module.fail_json(msg=stderr if stderr else stdout)
- if stderr:
- module.fail_json(msg=stderr)
- else:
- module.fail_json(msg=stdout)
+ return rc, stdout, stderr
- return changed
+def remove_repo(module, repo):
+ "Removes the repo."
+ cmd = _get_cmd('removerepo', repo)
-def remove_repo(module, repo, alias, old_zypper):
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
- if old_zypper:
- cmd = ['/usr/bin/zypper', 'sd']
- else:
- cmd = ['/usr/bin/zypper', 'rr']
- if alias:
- cmd.append(alias)
- else:
- cmd.append(repo)
- rc, stdout, stderr = module.run_command(cmd, check_rc=True)
- changed = rc == 0
- return changed
+def get_zypper_version(module):
+ rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version'])
+ if rc != 0 or not stdout.startswith('zypper '):
+ return LooseVersion('1.0')
+ return LooseVersion(stdout.split()[1])
+def runrefreshrepo(module, auto_import_keys=False, shortname=None):
+ "Forces zypper to refresh repo metadata."
+ if auto_import_keys:
+ cmd = _get_cmd('--gpg-auto-import-keys', 'refresh', '--force')
+ else:
+ cmd = _get_cmd('refresh', '--force')
+ if shortname is not None:
+ cmd.extend(['-r', shortname])
-def fail_if_rc_is_null(module, rc, stdout, stderr):
- if rc != 0:
- #module.fail_json(msg=stderr if stderr else stdout)
- if stderr:
- module.fail_json(msg=stderr)
- else:
- module.fail_json(msg=stdout)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
def main():
@@ -234,64 +316,99 @@ def main():
name=dict(required=False),
repo=dict(required=False),
state=dict(choices=['present', 'absent'], default='present'),
+ runrefresh=dict(required=False, default='no', type='bool'),
description=dict(required=False),
- disable_gpg_check = dict(required=False, default='no', type='bool'),
- refresh = dict(required=False, default='yes', type='bool'),
+ disable_gpg_check = dict(required=False, default=False, type='bool'),
+ autorefresh = dict(required=False, default=True, type='bool', aliases=['refresh']),
+ priority = dict(required=False, type='int'),
+ enabled = dict(required=False, default=True, type='bool'),
+ overwrite_multiple = dict(required=False, default=False, type='bool'),
+ auto_import_keys = dict(required=False, default=False, type='bool'),
),
supports_check_mode=False,
+ required_one_of = [['state','runrefresh']],
)
repo = module.params['repo']
+ alias = module.params['name']
state = module.params['state']
- name = module.params['name']
- description = module.params['description']
- disable_gpg_check = module.params['disable_gpg_check']
- refresh = module.params['refresh']
+ overwrite_multiple = module.params['overwrite_multiple']
+ auto_import_keys = module.params['auto_import_keys']
+ runrefresh = module.params['runrefresh']
+
+ zypper_version = get_zypper_version(module)
+ warnings = [] # collect warning messages for final output
+
+ repodata = {
+ 'url': repo,
+ 'alias': alias,
+ 'name': module.params['description'],
+ 'priority': module.params['priority'],
+ }
+ # rewrite bools in the language that zypper lr -x provides for easier comparison
+ if module.params['enabled']:
+ repodata['enabled'] = '1'
+ else:
+ repodata['enabled'] = '0'
+ if module.params['disable_gpg_check']:
+ repodata['gpgcheck'] = '0'
+ else:
+ repodata['gpgcheck'] = '1'
+ if module.params['autorefresh']:
+ repodata['autorefresh'] = '1'
+ else:
+ repodata['autorefresh'] = '0'
def exit_unchanged():
- module.exit_json(changed=False, repo=repo, state=state, name=name)
-
- rc, out = zypper_version(module)
- match = re.match(r'zypper\s+(\d+)\.(\d+)\.(\d+)', out)
- if not match or int(match.group(1)) > 0:
- old_zypper = False
- else:
- old_zypper = True
+ module.exit_json(changed=False, repodata=repodata, state=state)
# Check run-time module parameters
+ if repo == '*' or alias == '*':
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys)
+ module.exit_json(changed=False, runrefresh=True)
+ else:
+ module.fail_json(msg='repo=* can only be used with the runrefresh option.')
+
if state == 'present' and not repo:
module.fail_json(msg='Module option state=present requires repo')
- if state == 'absent' and not repo and not name:
+ if state == 'absent' and not repo and not alias:
module.fail_json(msg='Alias or repo parameter required when state=absent')
if repo and repo.endswith('.repo'):
- if name:
- module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding repo files')
+ if alias:
+ module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files')
else:
- if not name and state == "present":
- module.fail_json(msg='Name required when adding non-repo files:')
+ if not alias and state == "present":
+ module.fail_json(msg='Name required when adding non-repo files.')
- if repo and repo.endswith('.repo'):
- exists = repo_exists(module, old_zypper, url=repo, alias=name)
- elif repo:
- exists = repo_exists(module, old_zypper, url=repo)
+ exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
+
+ if repo:
+ shortname = repo
else:
- exists = repo_exists(module, old_zypper, alias=name)
+ shortname = alias
if state == 'present':
- if exists:
+ if exists and not mod:
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys, shortname)
exit_unchanged()
-
- changed = add_repo(module, repo, name, description, disable_gpg_check, old_zypper, refresh)
+ rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings)
+ if rc == 0 and (runrefresh or auto_import_keys):
+ runrefreshrepo(module, auto_import_keys, shortname)
elif state == 'absent':
if not exists:
exit_unchanged()
+ rc, stdout, stderr = remove_repo(module, shortname)
- changed = remove_repo(module, repo, name, old_zypper)
-
- module.exit_json(changed=changed, repo=repo, state=state)
+ if rc == 0:
+ module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings)
+ else:
+ module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/remote_management/__init__.py b/remote_management/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/remote_management/ipmi/__init__.py b/remote_management/ipmi/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/remote_management/ipmi/ipmi_boot.py b/remote_management/ipmi/ipmi_boot.py
new file mode 100644
index 00000000000..06281d4d46f
--- /dev/null
+++ b/remote_management/ipmi/ipmi_boot.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ command = None
+
+from ansible.module_utils.basic import *
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipmi_boot
+short_description: Management of order of boot devices
+description:
+ - Use this module to manage order of boot devices
+version_added: "2.2"
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ required: false
+ type: int
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ default: null
+ bootdev:
+ description:
+ - Set boot device to use on next reboot
+ required: true
+ choices:
+ - network -- Request network boot
+ - hd -- Boot from hard drive
+ - safe -- Boot from hard drive, requesting 'safe mode'
+ - optical -- boot from CD/DVD/BD drive
+ - setup -- Boot into setup utility
+ - default -- remove any IPMI directed boot device request
+ state:
+ description:
+ - Whether to ensure that boot devices is desired.
+ default: present
+ choices:
+ - present -- Request system turn on
+ - absent -- Request system turn on
+ persistent:
+ description:
+ - If set, ask that system firmware uses this device beyond next boot.
+ Be aware many systems do not honor this.
+ required: false
+ type: boolean
+ default: false
+ uefiboot:
+ description:
+ - If set, request UEFI boot explicitly.
+ Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option.
+ In practice, this flag not being set does not preclude UEFI boot on any system I've encountered.
+ required: false
+ type: boolean
+ default: false
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (gaifullinbf@gmail.com)"
+'''
+
+RETURN = '''
+bootdev:
+ description: The boot device name which will be used beyond next boot.
+ returned: success
+ type: string
+ sample: default
+persistent:
+ description: If True, system firmware will use this device beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+uefimode:
+ description: If True, system firmware will use UEFI boot explicitly beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+'''
+
+EXAMPLES = '''
+# Ensure bootdevice is HD.
+- ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ bootdev: hd
+
+# Ensure bootdevice is not Network
+- ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ bootdev: network
+ state: absent
+'''
+
+# ==================================================
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ bootdev=dict(required=True, choices=['network', 'hd', 'safe', 'optical', 'setup', 'default']),
+ persistent=dict(default=False, type='bool'),
+ uefiboot=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg='the python pyghmi module is required')
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ bootdev = module.params['bootdev']
+ persistent = module.params['persistent']
+ uefiboot = module.params['uefiboot']
+ request = dict()
+
+ if state == 'absent' and bootdev == 'default':
+ module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.")
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+ current = ipmi_cmd.get_bootdev()
+ # uefimode may not supported by BMC, so use desired value as default
+ current.setdefault('uefimode', uefiboot)
+ if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot):
+ request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent)
+ elif state == 'absent' and current['bootdev'] == bootdev:
+ request = dict(bootdev='default')
+ else:
+ module.exit_json(changed=False, **current)
+
+ if module.check_mode:
+ response = dict(bootdev=request['bootdev'])
+ else:
+ response = ipmi_cmd.set_bootdev(**request)
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ if 'persist' in request:
+ response['persistent'] = request['persist']
+ if 'uefiboot' in request:
+ response['uefimode'] = request['uefiboot']
+
+ module.exit_json(changed=True, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+if __name__ == '__main__':
+ main()
diff --git a/remote_management/ipmi/ipmi_power.py b/remote_management/ipmi/ipmi_power.py
new file mode 100644
index 00000000000..b661be4c535
--- /dev/null
+++ b/remote_management/ipmi/ipmi_power.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ command = None
+
+from ansible.module_utils.basic import *
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ipmi_power
+short_description: Power management for machine
+description:
+ - Use this module for power management
+version_added: "2.2"
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ required: false
+ type: int
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ default: null
+ state:
+ description:
+ - Whether to ensure that the machine in desired state.
+ required: true
+ choices:
+ - on -- Request system turn on
+ - off -- Request system turn off without waiting for OS to shutdown
+ - shutdown -- Have system request OS proper shutdown
+ - reset -- Request system reset without waiting for OS
+ - boot -- If system is off, then 'on', else 'reset'
+ timeout:
+ description:
+ - Maximum number of seconds before interrupt request.
+ required: false
+ type: int
+ default: 300
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (gaifullinbf@gmail.com)"
+'''
+
+RETURN = '''
+powerstate:
+ description: The current power state of the machine.
+ returned: success
+ type: string
+ sample: on
+'''
+
+EXAMPLES = '''
+# Ensure machine is powered on.
+- ipmi_power:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ state: on
+'''
+
+# ==================================================
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ state=dict(required=True, choices=['on', 'off', 'shutdown', 'reset', 'boot']),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(default=300, type='int'),
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg='the python pyghmi module is required')
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+
+ current = ipmi_cmd.get_power()
+ if current['powerstate'] != state:
+ response = {'powerstate': state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout)
+ changed = True
+ else:
+ response = current
+ changed = False
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ module.exit_json(changed=changed, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+if __name__ == '__main__':
+ main()
diff --git a/source_control/bzr.py b/source_control/bzr.py
index 0fc6ac28584..f66c00abf82 100644
--- a/source_control/bzr.py
+++ b/source_control/bzr.py
@@ -19,6 +19,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = u'''
---
module: bzr
@@ -62,7 +66,10 @@
EXAMPLES = '''
# Example bzr checkout from Ansible Playbooks
-- bzr: name=bzr+ssh://foosball.example.org/path/to/branch dest=/srv/checkout version=22
+- bzr:
+ name: 'bzr+ssh://foosball.example.org/path/to/branch'
+ dest: /srv/checkout
+ version: 22
'''
import re
@@ -143,7 +150,7 @@ def switch_version(self):
def main():
module = AnsibleModule(
argument_spec = dict(
- dest=dict(required=True),
+ dest=dict(required=True, type='path'),
name=dict(required=True, aliases=['parent']),
version=dict(default='head'),
force=dict(default='no', type='bool'),
@@ -151,7 +158,7 @@ def main():
)
)
- dest = os.path.abspath(os.path.expanduser(module.params['dest']))
+ dest = module.params['dest']
parent = module.params['name']
version = module.params['version']
force = module.params['force']
@@ -196,4 +203,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/source_control/git_config.py b/source_control/git_config.py
new file mode 100644
index 00000000000..16f2457dd98
--- /dev/null
+++ b/source_control/git_config.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Marius Gedminas
+# (c) 2016, Matthew Gamble
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: git_config
+author:
+ - "Matthew Gamble"
+ - "Marius Gedminas"
+version_added: 2.1
+requirements: ['git']
+short_description: Read and write git configuration
+description:
+ - The M(git_config) module changes git configuration by invoking 'git config'.
+ This is needed if you don't want to use M(template) for the entire git
+ config file (e.g. because you need to change just C(user.email) in
+ /etc/.git/config). Solutions involving M(command) are cumbersone or
+ don't work correctly in check mode.
+options:
+ list_all:
+ description:
+ - List all settings (optionally limited to a given I(scope))
+ required: false
+ choices: [ "yes", "no" ]
+ default: no
+ name:
+ description:
+ - The name of the setting. If no value is supplied, the value will
+ be read from the config if it has been set.
+ required: false
+ default: null
+ repo:
+ description:
+ - Path to a git repository for reading and writing values from a
+ specific repo.
+ required: false
+ default: null
+ scope:
+ description:
+ - Specify which scope to read/set values from. This is required
+ when setting config values. If this is set to local, you must
+ also specify the repo parameter. It defaults to system only when
+ not using I(list_all)=yes.
+ required: false
+ choices: [ "local", "global", "system" ]
+ default: null
+ value:
+ description:
+ - When specifying the name of a single setting, supply a value to
+ set that setting to the given value.
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+# Set some settings in ~/.gitconfig
+- git_config:
+ name: alias.ci
+ scope: global
+ value: commit
+
+- git_config:
+ name: alias.st
+ scope: global
+ value: status
+
+# Or system-wide:
+- git_config:
+ name: alias.remotev
+ scope: system
+ value: remote -v
+
+- git_config:
+ name: core.editor
+ scope: global
+ value: vim
+
+# scope=system is the default
+- git_config:
+ name: alias.diffc
+ value: diff --cached
+
+- git_config:
+ name: color.ui
+ value: auto
+
+# Make etckeeper not complain when invoked by cron
+- git_config:
+ name: user.email
+ repo: /etc
+ scope: local
+ value: 'root@{{ ansible_fqdn }}'
+
+# Read individual values from git config
+- git_config:
+ name: alias.ci
+ scope: global
+
+# scope: system is also assumed when reading values, unless list_all=yes
+- git_config:
+ name: alias.diffc
+
+# Read all values from git config
+- git_config:
+ list_all: yes
+ scope: global
+
+# When list_all=yes and no scope is specified, you get configuration from all scopes
+- git_config:
+ list_all: yes
+
+# Specify a repository to include local settings
+- git_config:
+ list_all: yes
+ repo: /path/to/repo.git
+'''
+
+RETURN = '''
+---
+config_value:
+ description: When list_all=no and value is not set, a string containing the value of the setting in name
+ returned: success
+ type: string
+ sample: "vim"
+
+config_values:
+ description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings
+ returned: success
+ type: dictionary
+ sample:
+ core.editor: "vim"
+ color.ui: "auto"
+ alias.diffc: "diff --cached"
+ alias.remotev: "remote -v"
+'''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ list_all=dict(required=False, type='bool', default=False),
+ name=dict(type='str'),
+ repo=dict(type='path'),
+ scope=dict(required=False, type='str', choices=['local', 'global', 'system']),
+ value=dict(required=False)
+ ),
+ mutually_exclusive=[['list_all', 'name'], ['list_all', 'value']],
+ required_if=[('scope', 'local', ['repo'])],
+ required_one_of=[['list_all', 'name']],
+ supports_check_mode=True,
+ )
+ git_path = module.get_bin_path('git')
+ if not git_path:
+ module.fail_json(msg="Could not find git. Please ensure it is installed.")
+
+ params = module.params
+ # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
+ # Set the locale to C to ensure consistent messages.
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if params['name']:
+ name = params['name']
+ else:
+ name = None
+
+ if params['scope']:
+ scope = params['scope']
+ elif params['list_all']:
+ scope = None
+ else:
+ scope = 'system'
+
+ if params['value']:
+ new_value = params['value']
+ else:
+ new_value = None
+
+ args = [git_path, "config", "--includes"]
+ if params['list_all']:
+ args.append('-l')
+ if scope:
+ args.append("--" + scope)
+ if name:
+ args.append(name)
+
+ if scope == 'local':
+ dir = params['repo']
+ elif params['list_all'] and params['repo']:
+ # Include local settings from a specific repo when listing all available settings
+ dir = params['repo']
+ else:
+ # Run from root directory to avoid accidentally picking up any local config settings
+ dir = "/"
+
+ (rc, out, err) = module.run_command(' '.join(args), cwd=dir)
+ if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
+ # This just means nothing has been set at the given scope
+ module.exit_json(changed=False, msg='', config_values={})
+ elif rc >= 2:
+ # If the return code is 1, it just means the option hasn't been set yet, which is fine.
+ module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
+
+ if params['list_all']:
+ values = out.rstrip().splitlines()
+ config_values = {}
+ for value in values:
+ k, v = value.split('=', 1)
+ config_values[k] = v
+ module.exit_json(changed=False, msg='', config_values=config_values)
+ elif not new_value:
+ module.exit_json(changed=False, msg='', config_value=out.rstrip())
+ else:
+ old_value = out.rstrip()
+ if old_value == new_value:
+ module.exit_json(changed=False, msg="")
+
+ if not module.check_mode:
+ new_value_quoted = "'" + new_value + "'"
+ (rc, out, err) = module.run_command(' '.join(args + [new_value_quoted]), cwd=dir)
+ if err:
+ module.fail_json(rc=rc, msg=err, cmd=' '.join(args + [new_value_quoted]))
+ module.exit_json(
+ msg='setting changed',
+ diff=dict(
+ before_header=' '.join(args),
+ before=old_value + "\n",
+ after_header=' '.join(args),
+ after=new_value + "\n"
+ ),
+ changed=True
+ )
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/source_control/github_hooks.py b/source_control/github_hooks.py
index d75fcb1573d..ce76b503c23 100644
--- a/source_control/github_hooks.py
+++ b/source_control/github_hooks.py
@@ -18,9 +18,21 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import json
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
import base64
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: github_hooks
@@ -49,7 +61,7 @@
description:
- This tells the githooks module what you want it to do.
required: true
- choices: [ "create", "cleanall" ]
+ choices: [ "create", "cleanall", "list", "clean504" ]
validate_certs:
description:
- If C(no), SSL certificates for the target repo will not be validated. This should only be used
@@ -69,10 +81,20 @@
EXAMPLES = '''
# Example creating a new service hook. It ignores duplicates.
-- github_hooks: action=create hookurl=http://11.111.111.111:2222 user={{ gituser }} oauthkey={{ oauthkey }} repo=https://api.github.com/repos/pcgentry/Github-Auto-Deploy
+- github_hooks:
+ action: create
+ hookurl: 'http://11.111.111.111:2222'
+ user: '{{ gituser }}'
+ oauthkey: '{{ oauthkey }}'
+ repo: 'https://api.github.com/repos/pcgentry/Github-Auto-Deploy'
# Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would be called from a handler.
-- local_action: github_hooks action=cleanall user={{ gituser }} oauthkey={{ oauthkey }} repo={{ repo }}
+- github_hooks:
+ action: cleanall
+ user: '{{ gituser }}'
+ oauthkey: '{{ oauthkey }}'
+ repo: '{{ repo }}'
+ delegate_to: localhost
'''
def _list(module, hookurl, oauthkey, repo, user):
@@ -144,9 +166,9 @@ def _delete(module, hookurl, oauthkey, repo, user, hookid):
def main():
module = AnsibleModule(
argument_spec=dict(
- action=dict(required=True),
+ action=dict(required=True, choices=['list','clean504','cleanall','create']),
hookurl=dict(required=False),
- oauthkey=dict(required=True),
+ oauthkey=dict(required=True, no_log=True),
repo=dict(required=True),
user=dict(required=True),
validate_certs=dict(default='yes', type='bool'),
@@ -183,4 +205,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/source_control/github_key.py b/source_control/github_key.py
new file mode 100644
index 00000000000..cc54734e004
--- /dev/null
+++ b/source_control/github_key.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: github_key
+short_description: Manage GitHub access keys.
+description:
+ - Creates, removes, or updates GitHub access keys.
+version_added: "2.2"
+options:
+ token:
+ description:
+ - GitHub Access Token with permission to list and create public keys.
+ required: true
+ name:
+ description:
+ - SSH key name
+ required: true
+ pubkey:
+ description:
+ - SSH public key value. Required when C(state=present).
+ required: false
+ default: none
+ state:
+ description:
+ - Whether to remove a key, ensure that it exists, or update its value.
+ choices: ['present', 'absent']
+ default: 'present'
+ required: false
+ force:
+ description:
+ - The default is C(yes), which will replace the existing remote key
+ if it's different than C(pubkey). If C(no), the key will only be
+ set if no key with the given C(name) exists.
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+
+author: Robert Estelle (@erydo)
+'''
+
+RETURN = '''
+deleted_keys:
+ description: An array of key objects that were deleted. Only present on state=absent
+ type: list
+ returned: When state=absent
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+matching_keys:
+ description: An array of keys matching the specified name. Only present on state=present
+ type: list
+ returned: When state=present
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+key:
+ description: Metadata about the key just created. Only present on state=present
+ type: dict
+ returned: success
+ sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}
+'''
+
+EXAMPLES = '''
+- name: Read SSH public key to authorize
+ shell: cat /home/foo/.ssh/id_rsa.pub
+ register: ssh_pub_key
+
+- name: Authorize key with GitHub
+ local_action:
+ module: github_key
+ name: Access Key for Some Machine
+ token: '{{ github_access_token }}'
+ pubkey: '{{ ssh_pub_key.stdout }}'
+'''
+
+
+import sys # noqa
+import json
+import re
+
+
+API_BASE = 'https://api.github.com'
+
+
+class GitHubResponse(object):
+ def __init__(self, response, info):
+ self.content = response.read()
+ self.info = info
+
+ def json(self):
+ return json.loads(self.content)
+
+ def links(self):
+ links = {}
+ if 'link' in self.info:
+ link_header = re.info['link']
+ matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header)
+ for url, rel in matches:
+ links[rel] = url
+ return links
+
+
+class GitHubSession(object):
+ def __init__(self, module, token):
+ self.module = module
+ self.token = token
+
+ def request(self, method, url, data=None):
+ headers = {
+ 'Authorization': 'token %s' % self.token,
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+ response, info = fetch_url(
+ self.module, url, method=method, data=data, headers=headers)
+ if not (200 <= info['status'] < 400):
+ self.module.fail_json(
+ msg=(" failed to send request %s to %s: %s"
+ % (method, url, info['msg'])))
+ return GitHubResponse(response, info)
+
+
+def get_all_keys(session):
+ url = API_BASE + '/user/keys'
+ while url:
+ r = session.request('GET', url)
+ for key in r.json():
+ yield key
+
+ url = r.links().get('next')
+
+
+def create_key(session, name, pubkey, check_mode):
+ if check_mode:
+ from datetime import datetime
+ now = datetime.utcnow()
+ return {
+ 'id': 0,
+ 'key': pubkey,
+ 'title': name,
+ 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY',
+ 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'),
+ 'read_only': False,
+ 'verified': False
+ }
+ else:
+ return session.request(
+ 'POST',
+ API_BASE + '/user/keys',
+ data=json.dumps({'title': name, 'key': pubkey})).json()
+
+
+def delete_keys(session, to_delete, check_mode):
+ if check_mode:
+ return
+
+ for key in to_delete:
+ session.request('DELETE', API_BASE + '/user/keys/%s' % key[id])
+
+
+def ensure_key_absent(session, name, check_mode):
+ to_delete = [key for key in get_all_keys(session) if key['title'] == name]
+ delete_keys(session, to_delete, check_mode=check_mode)
+
+ return {'changed': bool(to_delete),
+ 'deleted_keys': to_delete}
+
+
+def ensure_key_present(session, name, pubkey, force, check_mode):
+ matching_keys = [k for k in get_all_keys(session) if k['title'] == name]
+ deleted_keys = []
+
+ if matching_keys and force and matching_keys[0]['key'] != pubkey:
+ delete_keys(session, matching_keys, check_mode=check_mode)
+ (deleted_keys, matching_keys) = (matching_keys, [])
+
+ if not matching_keys:
+ key = create_key(session, name, pubkey, check_mode=check_mode)
+ else:
+ key = matching_keys[0]
+
+ return {
+ 'changed': bool(deleted_keys or not matching_keys),
+ 'deleted_keys': deleted_keys,
+ 'matching_keys': matching_keys,
+ 'key': key
+ }
+
+
+def main():
+ argument_spec = {
+ 'token': {'required': True, 'no_log': True},
+ 'name': {'required': True},
+ 'pubkey': {},
+ 'state': {'choices': ['present', 'absent'], 'default': 'present'},
+ 'force': {'default': True, 'type': 'bool'},
+ }
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ name = module.params['name']
+ state = module.params['state']
+ force = module.params['force']
+ pubkey = module.params.get('pubkey')
+
+ if pubkey:
+ pubkey_parts = pubkey.split(' ')
+ # Keys consist of a protocol, the key data, and an optional comment.
+ if len(pubkey_parts) < 2:
+ module.fail_json(msg='"pubkey" parameter has an invalid format')
+
+ # Strip out comment so we can compare to the keys GitHub returns.
+ pubkey = ' '.join(pubkey_parts[:2])
+ elif state == 'present':
+ module.fail_json(msg='"pubkey" is required when state=present')
+
+ session = GitHubSession(module, token)
+ if state == 'present':
+ result = ensure_key_present(session, name, pubkey, force=force,
+ check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_key_absent(session, name, check_mode=module.check_mode)
+
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import * # noqa
+from ansible.module_utils.urls import * # noqa
+
+if __name__ == '__main__':
+ main()
diff --git a/source_control/github_release.py b/source_control/github_release.py
new file mode 100644
index 00000000000..ac59e6b69ae
--- /dev/null
+++ b/source_control/github_release.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: github_release
+short_description: Interact with GitHub Releases
+description:
+ - Fetch metadata about Github Releases
+version_added: 2.2
+options:
+ token:
+ required: true
+ description:
+ - Github Personal Access Token for authenticating
+ user:
+ required: true
+ description:
+ - The GitHub account that owns the repository
+ repo:
+ required: true
+ description:
+ - Repository name
+ action:
+ required: true
+ description:
+ - Action to perform
+ choices: [ 'latest_release' ]
+
+author:
+ - "Adrian Moisey (@adrianmoisey)"
+requirements:
+ - "github3.py >= 1.0.0a3"
+'''
+
+EXAMPLES = '''
+- name: Get latest release of test/test
+ github:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: latest_release
+'''
+
+RETURN = '''
+latest_release:
+ description: Version of the latest release
+ type: string
+ returned: success
+ sample: 1.1.0
+'''
+
+try:
+ import github3
+
+ HAS_GITHUB_API = True
+except ImportError:
+ HAS_GITHUB_API = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(required=True),
+ user=dict(required=True),
+ token=dict(required=True, no_log=True),
+ action=dict(required=True, choices=['latest_release']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_GITHUB_API:
+ module.fail_json(msg='Missing requried github3 module (check docs or install with: pip install github3)')
+
+ repo = module.params['repo']
+ user = module.params['user']
+ login_token = module.params['token']
+ action = module.params['action']
+
+ # login to github
+ try:
+ gh = github3.login(token=str(login_token))
+ # test if we're actually logged in
+ gh.me()
+ except github3.AuthenticationFailed:
+ e = get_exception()
+ module.fail_json(msg='Failed to connect to Github: %s' % e)
+
+ repository = gh.repository(str(user), str(repo))
+
+ if not repository:
+ module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo))
+
+ if action == 'latest_release':
+ release = repository.latest_release()
+ if release:
+ module.exit_json(tag=release.tag_name)
+ else:
+ module.exit_json(tag=None)
+
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/source_control/gitlab_group.py b/source_control/gitlab_group.py
new file mode 100644
index 00000000000..4c133028474
--- /dev/null
+++ b/source_control/gitlab_group.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: gitlab_group
+short_description: Creates/updates/deletes Gitlab Groups
+description:
+ - When the group does not exists in Gitlab, it will be created.
+ - When the group does exists and state=absent, the group will be deleted.
+version_added: "2.1"
+author: "Werner Dijkerman (@dj-wasabi)"
+requirements:
+ - pyapi-gitlab python module
+options:
+ server_url:
+ description:
+ - Url of Gitlab server, with protocol (http or https).
+ required: true
+ validate_certs:
+ description:
+ - When using https if SSL certificate needs to be verified.
+ required: false
+ default: true
+ aliases:
+ - verify_ssl
+ login_user:
+ description:
+ - Gitlab user name.
+ required: false
+ default: null
+ login_password:
+ description:
+ - Gitlab password for login_user
+ required: false
+ default: null
+ login_token:
+ description:
+ - Gitlab token for logging in.
+ required: false
+ default: null
+ name:
+ description:
+ - Name of the group you want to create.
+ required: true
+ path:
+ description:
+ - The path of the group you want to create, this will be server_url/group_path
+ - If not supplied, the group_name will be used.
+ required: false
+ default: null
+ state:
+ description:
+ - create or delete group.
+ - Possible values are present and absent.
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+'''
+
+EXAMPLES = '''
+- name: "Delete Gitlab Group"
+ local_action: gitlab_group
+ server_url="http://gitlab.dj-wasabi.local"
+ validate_certs=false
+ login_token="WnUzDsxjy8230-Dy_k"
+ name=my_first_group
+ state=absent
+
+- name: "Create Gitlab Group"
+ local_action: gitlab_group
+ server_url="https://gitlab.dj-wasabi.local"
+ validate_certs=true
+ login_user=dj-wasabi
+ login_password="MySecretPassword"
+ name=my_first_group
+ path=my_first_group
+ state=present
+'''
+
+RETURN = '''# '''
+
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except:
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+class GitLabGroup(object):
+ def __init__(self, module, git):
+ self._module = module
+ self._gitlab = git
+
+ def createGroup(self, group_name, group_path):
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ return self._gitlab.creategroup(group_name, group_path)
+
+ def deleteGroup(self, group_name):
+ is_group_empty = True
+ group_id = self.idGroup(group_name)
+
+ for project in self._gitlab.getall(self._gitlab.getprojects):
+ owner = project['namespace']['name']
+ if owner == group_name:
+ is_group_empty = False
+
+ if is_group_empty:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ return self._gitlab.deletegroup(group_id)
+ else:
+ self._module.fail_json(msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.")
+
+ def existsGroup(self, group_name):
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return True
+ return False
+
+ def idGroup(self, group_name):
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return group['id']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(required=True),
+ validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
+ login_user=dict(required=False, no_log=True),
+ login_password=dict(required=False, no_log=True),
+ login_token=dict(required=False, no_log=True),
+ name=dict(required=True),
+ path=dict(required=False),
+ state=dict(default="present", choices=["present", "absent"]),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg="Missing requried gitlab module (check docs or install with: pip install pyapi-gitlab")
+
+ server_url = module.params['server_url']
+ verify_ssl = module.params['validate_certs']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_token = module.params['login_token']
+ group_name = module.params['name']
+ group_path = module.params['path']
+ state = module.params['state']
+
+ # We need both login_user and login_password or login_token, otherwise we fail.
+ if login_user is not None and login_password is not None:
+ use_credentials = True
+ elif login_token is not None:
+ use_credentials = False
+ else:
+ module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
+
+ # Set group_path to group_name if it is empty.
+ if group_path is None:
+ group_path = group_name.replace(" ", "_")
+
+ # Lets make an connection to the Gitlab server_url, with either login_user and login_password
+ # or with login_token
+ try:
+ if use_credentials:
+ git = gitlab.Gitlab(host=server_url)
+ git.login(user=login_user, password=login_password)
+ else:
+ git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
+
+ # Validate if group exists and take action based on "state"
+ group = GitLabGroup(module, git)
+ group_name = group_name.lower()
+ group_exists = group.existsGroup(group_name)
+
+ if group_exists and state == "absent":
+ group.deleteGroup(group_name)
+ module.exit_json(changed=True, result="Successfully deleted group %s" % group_name)
+ else:
+ if state == "absent":
+ module.exit_json(changed=False, result="Group deleted or does not exists")
+ else:
+ if group_exists:
+ module.exit_json(changed=False)
+ else:
+ if group.createGroup(group_name, group_path):
+ module.exit_json(changed=True, result="Successfully created or updated the group %s" % group_name)
+
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/source_control/gitlab_project.py b/source_control/gitlab_project.py
new file mode 100644
index 00000000000..94852afac86
--- /dev/null
+++ b/source_control/gitlab_project.py
@@ -0,0 +1,405 @@
+#!/usr/bin/python
+# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: gitlab_project
+short_description: Creates/updates/deletes Gitlab Projects
+description:
+ - When the project does not exists in Gitlab, it will be created.
+ - When the project does exists and state=absent, the project will be deleted.
+ - When changes are made to the project, the project will be updated.
+version_added: "2.1"
+author: "Werner Dijkerman (@dj-wasabi)"
+requirements:
+ - pyapi-gitlab python module
+options:
+ server_url:
+ description:
+ - Url of Gitlab server, with protocol (http or https).
+ required: true
+ validate_certs:
+ description:
+ - When using https if SSL certificate needs to be verified.
+ required: false
+ default: true
+ aliases:
+ - verify_ssl
+ login_user:
+ description:
+ - Gitlab user name.
+ required: false
+ default: null
+ login_password:
+ description:
+ - Gitlab password for login_user
+ required: false
+ default: null
+ login_token:
+ description:
+ - Gitlab token for logging in.
+ required: false
+ default: null
+ group:
+ description:
+ - The name of the group of which this projects belongs to.
+ - When not provided, project will belong to user which is configured in 'login_user' or 'login_token'
+ - When provided with username, project will be created for this user. 'login_user' or 'login_token' needs admin rights.
+ required: false
+ default: null
+ name:
+ description:
+ - The name of the project
+ required: true
+ path:
+ description:
+ - The path of the project you want to create, this will be server_url//path
+ - If not supplied, name will be used.
+ required: false
+ default: null
+ description:
+ description:
+ - An description for the project.
+ required: false
+ default: null
+ issues_enabled:
+ description:
+ - Whether you want to create issues or not.
+ - Possible values are true and false.
+ required: false
+ default: true
+ merge_requests_enabled:
+ description:
+ - If merge requests can be made or not.
+ - Possible values are true and false.
+ required: false
+ default: true
+ wiki_enabled:
+ description:
+ - If an wiki for this project should be available or not.
+ - Possible values are true and false.
+ required: false
+ default: true
+ snippets_enabled:
+ description:
+ - If creating snippets should be available or not.
+ - Possible values are true and false.
+ required: false
+ default: true
+ public:
+ description:
+ - If the project is public available or not.
+ - Setting this to true is same as setting visibility_level to 20.
+ - Possible values are true and false.
+ required: false
+ default: false
+ visibility_level:
+ description:
+ - Private. visibility_level is 0. Project access must be granted explicitly for each user.
+ - Internal. visibility_level is 10. The project can be cloned by any logged in user.
+ - Public. visibility_level is 20. The project can be cloned without any authentication.
+ - Possible values are 0, 10 and 20.
+ required: false
+ default: 0
+ import_url:
+ description:
+ - Git repository which will me imported into gitlab.
+ - Gitlab server needs read access to this git repository.
+ required: false
+ default: false
+ state:
+ description:
+ - create or delete project.
+ - Possible values are present and absent.
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+'''
+
+EXAMPLES = '''
+- name: "Delete Gitlab Project"
+ local_action: gitlab_project
+ server_url="http://gitlab.dj-wasabi.local"
+ validate_certs=false
+ login_token="WnUzDsxjy8230-Dy_k"
+ name=my_first_project
+ state=absent
+
+- name: "Create Gitlab Project in group Ansible"
+ local_action: gitlab_project
+ server_url="https://gitlab.dj-wasabi.local"
+ validate_certs=true
+ login_user=dj-wasabi
+ login_password="MySecretPassword"
+ name=my_first_project
+ group=ansible
+ issues_enabled=false
+ wiki_enabled=true
+ snippets_enabled=true
+ import_url="http://git.example.com/example/lab.git"
+ state=present
+'''
+
+RETURN = '''# '''
+
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except:
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+
+class GitLabProject(object):
+ def __init__(self, module, git):
+ self._module = module
+ self._gitlab = git
+
+ def createOrUpdateProject(self, project_exists, group_name, import_url, arguments):
+ is_user = False
+ group_id = self.getGroupId(group_name)
+ if not group_id:
+ group_id = self.getUserId(group_name)
+ is_user = True
+
+ if project_exists:
+ # Edit project
+ return self.updateProject(group_name, arguments)
+ else:
+ # Create project
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ return self.createProject(is_user, group_id, import_url, arguments)
+
+ def createProject(self, is_user, user_id, import_url, arguments):
+ if is_user:
+ return self._gitlab.createprojectuser(user_id=user_id, import_url=import_url, **arguments)
+ else:
+ group_id = user_id
+ return self._gitlab.createproject(namespace_id=group_id, import_url=import_url, **arguments)
+
+ def deleteProject(self, group_name, project_name):
+ if self.existsGroup(group_name):
+ project_owner = group_name
+ else:
+ project_owner = self._gitlab.currentuser()['username']
+
+ search_results = self._gitlab.searchproject(search=project_name)
+ for result in search_results:
+ owner = result['namespace']['name']
+ if owner == project_owner:
+ return self._gitlab.deleteproject(result['id'])
+
+ def existsProject(self, group_name, project_name):
+ if self.existsGroup(group_name):
+ project_owner = group_name
+ else:
+ project_owner = self._gitlab.currentuser()['username']
+
+ search_results = self._gitlab.searchproject(search=project_name)
+ for result in search_results:
+ owner = result['namespace']['name']
+ if owner == project_owner:
+ return True
+ return False
+
+ def existsGroup(self, group_name):
+ if group_name is not None:
+ # Find the group, if group not exists we try for user
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return True
+
+ user_name = group_name
+ user_data = self._gitlab.getusers(search=user_name)
+ for data in user_data:
+ if 'id' in user_data:
+ return True
+ return False
+
+ def getGroupId(self, group_name):
+ if group_name is not None:
+ # Find the group, if group not exists we try for user
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return group['id']
+
+ def getProjectId(self, group_name, project_name):
+ if self.existsGroup(group_name):
+ project_owner = group_name
+ else:
+ project_owner = self._gitlab.currentuser()['username']
+
+ search_results = self._gitlab.searchproject(search=project_name)
+ for result in search_results:
+ owner = result['namespace']['name']
+ if owner == project_owner:
+ return result['id']
+
+ def getUserId(self, user_name):
+ user_data = self._gitlab.getusers(search=user_name)
+
+ for data in user_data:
+ if 'id' in data:
+ return data['id']
+ return self._gitlab.currentuser()['id']
+
+ def to_bool(self, value):
+ if value:
+ return 1
+ else:
+ return 0
+
+ def updateProject(self, group_name, arguments):
+ project_changed = False
+ project_name = arguments['name']
+ project_id = self.getProjectId(group_name, project_name)
+ project_data = self._gitlab.getproject(project_id=project_id)
+
+ for arg_key, arg_value in arguments.items():
+ project_data_value = project_data[arg_key]
+
+ if isinstance(project_data_value, bool) or project_data_value is None:
+ to_bool = self.to_bool(project_data_value)
+ if to_bool != arg_value:
+ project_changed = True
+ continue
+ else:
+ if project_data_value != arg_value:
+ project_changed = True
+
+ if project_changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ return self._gitlab.editproject(project_id=project_id, **arguments)
+ else:
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(required=True),
+ validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
+ login_user=dict(required=False, no_log=True),
+ login_password=dict(required=False, no_log=True),
+ login_token=dict(required=False, no_log=True),
+ group=dict(required=False),
+ name=dict(required=True),
+ path=dict(required=False),
+ description=dict(required=False),
+ issues_enabled=dict(default=True, type='bool'),
+ merge_requests_enabled=dict(default=True, type='bool'),
+ wiki_enabled=dict(default=True, type='bool'),
+ snippets_enabled=dict(default=True, type='bool'),
+ public=dict(default=False, type='bool'),
+ visibility_level=dict(default="0", choices=["0", "10", "20"]),
+ import_url=dict(required=False),
+ state=dict(default="present", choices=["present", 'absent']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
+
+ server_url = module.params['server_url']
+ verify_ssl = module.params['validate_certs']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_token = module.params['login_token']
+ group_name = module.params['group']
+ project_name = module.params['name']
+ project_path = module.params['path']
+ description = module.params['description']
+ issues_enabled = module.params['issues_enabled']
+ merge_requests_enabled = module.params['merge_requests_enabled']
+ wiki_enabled = module.params['wiki_enabled']
+ snippets_enabled = module.params['snippets_enabled']
+ public = module.params['public']
+ visibility_level = module.params['visibility_level']
+ import_url = module.params['import_url']
+ state = module.params['state']
+
+ # We need both login_user and login_password or login_token, otherwise we fail.
+ if login_user is not None and login_password is not None:
+ use_credentials = True
+ elif login_token is not None:
+ use_credentials = False
+ else:
+ module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
+
+ # Set project_path to project_name if it is empty.
+ if project_path is None:
+ project_path = project_name.replace(" ", "_")
+
+ # Gitlab API makes no difference between upper and lower cases, so we lower them.
+ project_name = project_name.lower()
+ project_path = project_path.lower()
+ if group_name is not None:
+ group_name = group_name.lower()
+
+ # Lets make an connection to the Gitlab server_url, with either login_user and login_password
+ # or with login_token
+ try:
+ if use_credentials:
+ git = gitlab.Gitlab(host=server_url, verify_ssl=verify_ssl)
+ git.login(user=login_user, password=login_password)
+ else:
+ git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
+
+ # Validate if project exists and take action based on "state"
+ project = GitLabProject(module, git)
+ project_exists = project.existsProject(group_name, project_name)
+
+ # Creating the project dict
+ arguments = {"name": project_name,
+ "path": project_path,
+ "description": description,
+ "issues_enabled": project.to_bool(issues_enabled),
+ "merge_requests_enabled": project.to_bool(merge_requests_enabled),
+ "wiki_enabled": project.to_bool(wiki_enabled),
+ "snippets_enabled": project.to_bool(snippets_enabled),
+ "public": project.to_bool(public),
+ "visibility_level": int(visibility_level)}
+
+ if project_exists and state == "absent":
+ project.deleteProject(group_name, project_name)
+ module.exit_json(changed=True, result="Successfully deleted project %s" % project_name)
+ else:
+ if state == "absent":
+ module.exit_json(changed=False, result="Project deleted or does not exists")
+ else:
+ if project.createOrUpdateProject(project_exists, group_name, import_url, arguments):
+ module.exit_json(changed=True, result="Successfully created or updated the project %s" % project_name)
+ else:
+ module.exit_json(changed=False)
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/source_control/gitlab_user.py b/source_control/gitlab_user.py
new file mode 100644
index 00000000000..e289d70e2c0
--- /dev/null
+++ b/source_control/gitlab_user.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: gitlab_user
+short_description: Creates/updates/deletes Gitlab Users
+description:
+ - When the user does not exists in Gitlab, it will be created.
+ - When the user does exists and state=absent, the user will be deleted.
+ - When changes are made to user, the user will be updated.
+version_added: "2.1"
+author: "Werner Dijkerman (@dj-wasabi)"
+requirements:
+ - pyapi-gitlab python module
+options:
+ server_url:
+ description:
+ - Url of Gitlab server, with protocol (http or https).
+ required: true
+ validate_certs:
+ description:
+ - When using https if SSL certificate needs to be verified.
+ required: false
+ default: true
+ aliases:
+ - verify_ssl
+ login_user:
+ description:
+ - Gitlab user name.
+ required: false
+ default: null
+ login_password:
+ description:
+ - Gitlab password for login_user
+ required: false
+ default: null
+ login_token:
+ description:
+ - Gitlab token for logging in.
+ required: false
+ default: null
+ name:
+ description:
+ - Name of the user you want to create
+ required: true
+ username:
+ description:
+ - The username of the user.
+ required: true
+ password:
+ description:
+ - The password of the user.
+ required: true
+ email:
+ description:
+ - The email that belongs to the user.
+ required: true
+ sshkey_name:
+ description:
+ - The name of the sshkey
+ required: false
+ default: null
+ sshkey_file:
+ description:
+ - The ssh key itself.
+ required: false
+ default: null
+ group:
+ description:
+ - Add user as an member to this group.
+ required: false
+ default: null
+ access_level:
+ description:
+ - The access level to the group. One of the following can be used.
+ - guest
+ - reporter
+ - developer
+ - master
+ - owner
+ required: false
+ default: null
+ state:
+ description:
+ - create or delete group.
+ - Possible values are present and absent.
+ required: false
+ default: present
+ choices: ["present", "absent"]
+'''
+
+EXAMPLES = '''
+- name: "Delete Gitlab User"
+ local_action: gitlab_user
+ server_url="http://gitlab.dj-wasabi.local"
+ validate_certs=false
+ login_token="WnUzDsxjy8230-Dy_k"
+ username=myusername
+ state=absent
+
+- name: "Create Gitlab User"
+ local_action: gitlab_user
+ server_url="https://gitlab.dj-wasabi.local"
+ validate_certs=true
+ login_user=dj-wasabi
+ login_password="MySecretPassword"
+ name=My Name
+ username=myusername
+ password=mysecretpassword
+ email=me@home.com
+ sshkey_name=MySSH
+ sshkey_file=ssh-rsa AAAAB3NzaC1yc...
+ state=present
+'''
+
+RETURN = '''# '''
+
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except:
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.basic import *
+
+
+class GitLabUser(object):
+ def __init__(self, module, git):
+ self._module = module
+ self._gitlab = git
+
+ def addToGroup(self, group_id, user_id, access_level):
+ if access_level == "guest":
+ level = 10
+ elif access_level == "reporter":
+ level = 20
+ elif access_level == "developer":
+ level = 30
+ elif access_level == "master":
+ level = 40
+ elif access_level == "owner":
+ level = 50
+ return self._gitlab.addgroupmember(group_id, user_id, level)
+
+ def createOrUpdateUser(self, user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level):
+ group_id = ''
+ arguments = {"name": user_name,
+ "username": user_username,
+ "email": user_email}
+
+ if group_name is not None:
+ if self.existsGroup(group_name):
+ group_id = self.getGroupId(group_name)
+
+ if self.existsUser(user_username):
+ self.updateUser(group_id, user_sshkey_name, user_sshkey_file, access_level, arguments)
+ else:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self.createUser(group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments)
+
+ def createUser(self, group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments):
+ user_changed = False
+
+ # Create the user
+ user_username = arguments['username']
+ user_name = arguments['name']
+ user_email = arguments['email']
+ if self._gitlab.createuser(password=user_password, **arguments):
+ user_id = self.getUserId(user_username)
+ if self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
+ user_changed = True
+ # Add the user to the group if group_id is not empty
+ if group_id != '':
+ if self.addToGroup(group_id, user_id, access_level):
+ user_changed = True
+ user_changed = True
+
+ # Exit with change to true or false
+ if user_changed:
+ self._module.exit_json(changed=True, result="Created the user")
+ else:
+ self._module.exit_json(changed=False)
+
+ def deleteUser(self, user_username):
+ user_id = self.getUserId(user_username)
+
+ if self._gitlab.deleteuser(user_id):
+ self._module.exit_json(changed=True, result="Successfully deleted user %s" % user_username)
+ else:
+ self._module.exit_json(changed=False, result="User %s already deleted or something went wrong" % user_username)
+
+ def existsGroup(self, group_name):
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return True
+ return False
+
+ def existsUser(self, username):
+ found_user = self._gitlab.getusers(search=username)
+ for user in found_user:
+ if user['id'] != '':
+ return True
+ return False
+
+ def getGroupId(self, group_name):
+ for group in self._gitlab.getall(self._gitlab.getgroups):
+ if group['name'] == group_name:
+ return group['id']
+
+ def getUserId(self, username):
+ found_user = self._gitlab.getusers(search=username)
+ for user in found_user:
+ if user['id'] != '':
+ return user['id']
+
+ def updateUser(self, group_id, user_sshkey_name, user_sshkey_file, access_level, arguments):
+ user_changed = False
+ user_username = arguments['username']
+ user_id = self.getUserId(user_username)
+ user_data = self._gitlab.getuser(user_id=user_id)
+
+ # Lets check if we need to update the user
+ for arg_key, arg_value in arguments.items():
+ if user_data[arg_key] != arg_value:
+ user_changed = True
+
+ if user_changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._gitlab.edituser(user_id=user_id, **arguments)
+ user_changed = True
+ if self._module.check_mode or self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
+ user_changed = True
+ if group_id != '':
+ if self._module.check_mode or self.addToGroup(group_id, user_id, access_level):
+ user_changed = True
+ if user_changed:
+ self._module.exit_json(changed=True, result="The user %s is updated" % user_username)
+ else:
+ self._module.exit_json(changed=False, result="The user %s is already up2date" % user_username)
+
+
+def main():
+ global user_id
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(required=True),
+ validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
+ login_user=dict(required=False, no_log=True),
+ login_password=dict(required=False, no_log=True),
+ login_token=dict(required=False, no_log=True),
+ name=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ email=dict(required=True),
+ sshkey_name=dict(required=False),
+ sshkey_file=dict(required=False),
+ group=dict(required=False),
+ access_level=dict(required=False, choices=["guest", "reporter", "developer", "master", "owner"]),
+ state=dict(default="present", choices=["present", "absent"]),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
+
+ server_url = module.params['server_url']
+ verify_ssl = module.params['validate_certs']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_token = module.params['login_token']
+ user_name = module.params['name']
+ user_username = module.params['username']
+ user_password = module.params['password']
+ user_email = module.params['email']
+ user_sshkey_name = module.params['sshkey_name']
+ user_sshkey_file = module.params['sshkey_file']
+ group_name = module.params['group']
+ access_level = module.params['access_level']
+ state = module.params['state']
+
+ # We need both login_user and login_password or login_token, otherwise we fail.
+ if login_user is not None and login_password is not None:
+ use_credentials = True
+ elif login_token is not None:
+ use_credentials = False
+ else:
+ module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
+
+ # Check if vars are none
+ if user_sshkey_file is not None and user_sshkey_name is not None:
+ use_sshkey = True
+ else:
+ use_sshkey = False
+
+ if group_name is not None and access_level is not None:
+ add_to_group = True
+ group_name = group_name.lower()
+ else:
+ add_to_group = False
+
+ user_username = user_username.lower()
+
+ # Lets make an connection to the Gitlab server_url, with either login_user and login_password
+ # or with login_token
+ try:
+ if use_credentials:
+ git = gitlab.Gitlab(host=server_url)
+ git.login(user=login_user, password=login_password)
+ else:
+ git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
+
+ # Validate if group exists and take action based on "state"
+ user = GitLabUser(module, git)
+
+ # Check if user exists, if not exists and state = absent, we exit nicely.
+ if not user.existsUser(user_username) and state == "absent":
+ module.exit_json(changed=False, result="User already deleted or does not exists")
+ else:
+ # User exists,
+ if state == "absent":
+ user.deleteUser(user_username)
+ else:
+ user.createOrUpdateUser(user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level)
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/__init__.py b/storage/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/storage/netapp/README.md b/storage/netapp/README.md
new file mode 100644
index 00000000000..8d5ab2fd4cf
--- /dev/null
+++ b/storage/netapp/README.md
@@ -0,0 +1,454 @@
+#NetApp Storage Modules
+This directory contains modules that support the storage platforms in the NetApp portfolio.
+
+##SANtricity Modules
+The modules prefixed with *netapp\_e* are built to support the SANtricity storage platform. They require the SANtricity
+WebServices Proxy. The WebServices Proxy is free software available at the [NetApp Software Download site](http://mysupport.netapp.com/NOW/download/software/eseries_webservices/1.40.X000.0009/).
+Starting with the E2800 platform (11.30 OS), the modules will work directly with the storage array. Starting with this
+platform, REST API requests are handled directly on the box. This array can still be managed by proxy for large scale deployments.
+The modules provide idempotent provisioning for volume groups, disk pools, standard volumes, thin volumes, LUN mapping,
+hosts, host groups (clusters), volume snapshots, consistency groups, and asynchronous mirroring.
+### Prerequisites
+| Software | Version |
+| -------- |:-------:|
+| SANtricity Web Services Proxy*|1.4 or 2.0|
+| Ansible | 2.2** |
+\* Not required for *E2800 with 11.30 OS*
+\*\*The modules where developed with this version. Ansible forward and backward compatibility applies.
+
+###Questions and Contribution
+Please feel free to submit pull requests with improvements. Issues for these modules should be routed to @hulquest but
+we also try to keep an eye on the list for issues specific to these modules. General questions can be made to our [development team](mailto:ng-hsg-engcustomer-esolutions-support@netapp.com)
+
+### Examples
+These examples are not comprehensive but are intended to help you get started when integrating storage provisioning into
+your playbooks.
+```yml
+- name: NetApp Test All Modules
+ hosts: proxy20
+ gather_facts: yes
+ connection: local
+ vars:
+ storage_systems:
+ ansible1:
+ address1: "10.251.230.41"
+ address2: "10.251.230.42"
+ ansible2:
+ address1: "10.251.230.43"
+ address2: "10.251.230.44"
+ ansible3:
+ address1: "10.251.230.45"
+ address2: "10.251.230.46"
+ ansible4:
+ address1: "10.251.230.47"
+ address2: "10.251.230.48"
+ storage_pools:
+ Disk_Pool_1:
+ raid_level: raidDiskPool
+ criteria_drive_count: 11
+ Disk_Pool_2:
+ raid_level: raidDiskPool
+ criteria_drive_count: 11
+ Disk_Pool_3:
+ raid_level: raid0
+ criteria_drive_count: 2
+ volumes:
+ vol_1:
+ storage_pool_name: Disk_Pool_1
+ size: 10
+ thin_provision: false
+ thin_volume_repo_size: 7
+ vol_2:
+ storage_pool_name: Disk_Pool_2
+ size: 10
+ thin_provision: false
+ thin_volume_repo_size: 7
+ vol_3:
+ storage_pool_name: Disk_Pool_3
+ size: 10
+ thin_provision: false
+ thin_volume_repo_size: 7
+ thin_vol_1:
+ storage_pool_name: Disk_Pool_1
+ size: 10
+ thin_provision: true
+ thin_volume_repo_size: 7
+ hosts:
+ ANSIBLE-1:
+ host_type: 1
+ index: 1
+ ports:
+ - type: 'fc'
+ label: 'fpPort1'
+ port: '2100000E1E191B01'
+
+ netapp_api_host: 10.251.230.29
+ netapp_api_url: http://{{ netapp_api_host }}/devmgr/v2
+ netapp_api_username: rw
+ netapp_api_password: rw
+ ssid: ansible1
+ auth: no
+ lun_mapping: no
+ netapp_api_validate_certs: False
+ snapshot: no
+ gather_facts: no
+ amg_create: no
+ remove_volume: no
+ make_volume: no
+ check_thins: no
+ remove_storage_pool: yes
+ check_storage_pool: yes
+ remove_storage_system: no
+ check_storage_system: yes
+ change_role: no
+ flash_cache: False
+ configure_hostgroup: no
+ configure_async_mirror: False
+ configure_snapshot: no
+ copy_volume: False
+ volume_copy_source_volume_id:
+ volume_destination_source_volume_id:
+ snapshot_volume_storage_pool_name: Disk_Pool_3
+ snapshot_volume_image_id: 3400000060080E5000299B640063074057BC5C5E
+ snapshot_volume: no
+ snapshot_volume_name: vol_1_snap_vol
+ host_type_index: 1
+ host_name: ANSIBLE-1
+ set_host: no
+ remove_host: no
+ amg_member_target_array:
+ amg_member_primary_pool:
+ amg_member_secondary_pool:
+ amg_member_primary_volume:
+ amg_member_secondary_volume:
+ set_amg_member: False
+ amg_array_name: foo
+ amg_name: amg_made_by_ansible
+ amg_secondaryArrayId: ansible2
+ amg_sync_name: foo
+ amg_sync: no
+
+ tasks:
+
+ - name: Get array facts
+ netapp_e_facts:
+ ssid: "{{ item.key }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ with_dict: "{{ storage_systems }}"
+ when: gather_facts
+
+ - name: Presence of storage system
+ netapp_e_storage_system:
+ ssid: "{{ item.key }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ controller_addresses:
+ - "{{ item.value.address1 }}"
+ - "{{ item.value.address2 }}"
+ with_dict: "{{ storage_systems }}"
+ when: check_storage_system
+
+ - name: Create Snapshot
+ netapp_e_snapshot_images:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ snapshot_group: "ansible_snapshot_group"
+ state: 'create'
+ when: snapshot
+
+ - name: Auth Module Example
+ netapp_e_auth:
+ ssid: "{{ ssid }}"
+ current_password: 'Infinit2'
+ new_password: 'Infinit1'
+ set_admin: yes
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ when: auth
+
+ - name: No disk groups
+ netapp_e_storagepool:
+ ssid: "{{ ssid }}"
+ name: "{{ item }}"
+ state: absent
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ remove_volumes: yes
+ with_items:
+ - Disk_Pool_1
+ - Disk_Pool_2
+ - Disk_Pool_3
+ when: remove_storage_pool
+
+ - name: Make disk groups
+ netapp_e_storagepool:
+ ssid: "{{ ssid }}"
+ name: "{{ item.key }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ raid_level: "{{ item.value.raid_level }}"
+ criteria_drive_count: "{{ item.value.criteria_drive_count }}"
+ with_dict: " {{ storage_pools }}"
+ when: check_storage_pool
+
+ - name: No thin volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: NewThinVolumeByAnsible
+ state: absent
+ thin_provision: yes
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ when: check_thins
+
+ - name: Make a thin volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: NewThinVolumeByAnsible
+ state: present
+ thin_provision: yes
+ thin_volume_repo_size: 7
+ size: 10
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ storage_pool_name: Disk_Pool_1
+ when: check_thins
+
+ - name: Remove standard/thick volumes
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: "{{ item.key }}"
+ state: absent
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ with_dict: "{{ volumes }}"
+ when: remove_volume
+
+ - name: Make a volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: "{{ item.key }}"
+ state: present
+ storage_pool_name: "{{ item.value.storage_pool_name }}"
+ size: "{{ item.value.size }}"
+ thin_provision: "{{ item.value.thin_provision }}"
+ thin_volume_repo_size: "{{ item.value.thin_volume_repo_size }}"
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ with_dict: "{{ volumes }}"
+ when: make_volume
+
+ - name: No storage system
+ netapp_e_storage_system:
+ ssid: "{{ item.key }}"
+ state: absent
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ with_dict: "{{ storage_systems }}"
+ when: remove_storage_system
+
+ - name: Update the role of a storage array
+ netapp_e_amg_role:
+ name: "{{ amg_name }}"
+ role: primary
+ force: true
+ noSync: true
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ when: change_role
+
+ - name: Flash Cache
+ netapp_e_flashcache:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ name: SSDCacheBuiltByAnsible
+ when: flash_cache
+
+ - name: Configure Hostgroup
+ netapp_e_hostgroup:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: absent
+ name: "ansible-host-group"
+ when: configure_hostgroup
+
+ - name: Configure Snapshot group
+ netapp_e_snapshot_group:
+ ssid: "{{ ssid }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ base_volume_name: vol_3
+ name: ansible_snapshot_group
+ repo_pct: 20
+ warning_threshold: 85
+ delete_limit: 30
+ full_policy: purgepit
+ storage_pool_name: Disk_Pool_3
+ rollback_priority: medium
+ when: configure_snapshot
+
+ - name: Copy volume
+ netapp_e_volume_copy:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ status: present
+ source_volume_id: "{{ volume_copy_source_volume_id }}"
+ destination_volume_id: "{{ volume_destination_source_volume_id }}"
+ when: copy_volume
+
+ - name: Snapshot volume
+ netapp_e_snapshot_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ state: present
+ storage_pool_name: "{{ snapshot_volume_storage_pool_name }}"
+ snapshot_image_id: "{{ snapshot_volume_image_id }}"
+ name: "{{ snapshot_volume_name }}"
+ when: snapshot_volume
+
+ - name: Remove hosts
+ netapp_e_host:
+ ssid: "{{ ssid }}"
+ state: absent
+ name: "{{ item.key }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ host_type_index: "{{ host_type_index }}"
+ with_dict: "{{hosts}}"
+ when: remove_host
+
+ - name: Ensure/add hosts
+ netapp_e_host:
+ ssid: "{{ ssid }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ name: "{{ item.key }}"
+ host_type_index: "{{ item.value.index }}"
+ ports:
+ - type: 'fc'
+ label: 'fpPort1'
+ port: '2100000E1E191B01'
+ with_dict: "{{hosts}}"
+ when: set_host
+
+ - name: Unmap a volume
+ netapp_e_lun_mapping:
+ state: absent
+ ssid: "{{ ssid }}"
+ lun: 2
+ target: "{{ host_name }}"
+ volume_name: "thin_vol_1"
+ target_type: host
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ when: lun_mapping
+
+ - name: Map a volume
+ netapp_e_lun_mapping:
+ state: present
+ ssid: "{{ ssid }}"
+ lun: 16
+ target: "{{ host_name }}"
+ volume_name: "thin_vol_1"
+ target_type: host
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ when: lun_mapping
+
+ - name: Update LUN Id
+ netapp_e_lun_mapping:
+ state: present
+ ssid: "{{ ssid }}"
+ lun: 2
+ target: "{{ host_name }}"
+ volume_name: "thin_vol_1"
+ target_type: host
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ when: lun_mapping
+
+ - name: AMG removal
+ netapp_e_amg:
+ state: absent
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+
+ - name: AMG create
+ netapp_e_amg:
+ state: present
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+
+ - name: start AMG async
+ netapp_e_amg_sync:
+ name: "{{ amg_name }}"
+ state: running
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ when: amg_sync
+```
diff --git a/storage/netapp/__init__.py b/storage/netapp/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/storage/netapp/netapp_e_amg.py b/storage/netapp/netapp_e_amg.py
new file mode 100644
index 00000000000..e5f60b29454
--- /dev/null
+++ b/storage/netapp/netapp_e_amg.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_amg
+short_description: Create, Remove, and Update Asynchronous Mirror Groups
+description:
+ - Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ name:
+ description:
+ - The name of the async array you wish to target, or create.
+ - If C(state) is present and the name isn't found, it will attempt to create.
+ required: yes
+ secondaryArrayId:
+ description:
+ - The ID of the secondary array to be used in mirroing process
+ required: yes
+ syncIntervalMinutes:
+ description:
+ - The synchronization interval in minutes
+ required: no
+ default: 10
+ manualSync:
+ description:
+ - Setting this to true will cause other synchronization values to be ignored
+ required: no
+ default: no
+ recoveryWarnThresholdMinutes:
+ description:
+ - Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value
+ required: no
+ default: 20
+ repoUtilizationWarnThreshold:
+ description:
+ - Recovery point warning threshold
+ required: no
+ default: 80
+ interfaceType:
+ description:
+ - The intended protocol to use if both Fibre and iSCSI are available.
+ choices:
+ - iscsi
+ - fibre
+ required: no
+ default: null
+ syncWarnThresholdMinutes:
+ description:
+ - The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete.
+ required: no
+ default: 10
+ ssid:
+ description:
+ - The ID of the primary storage array for the async mirror action
+ required: yes
+ state:
+ description:
+ - A C(state) of present will either create or update the async mirror group.
+ - A C(state) of absent will remove the async mirror group.
+ required: yes
+"""
+
+EXAMPLES = """
+ - name: AMG removal
+ na_eseries_amg:
+ state: absent
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+
+ - name: AMG create
+ netapp_e_amg:
+ state: present
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+"""
+
+RETURN = """
+msg:
+ description: Successful removal
+ returned: success
+ type: string
+ sample: "Async mirror group removed."
+
+msg:
+ description: Successful creation
+ returned: success
+ type: string
+ sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}'
+"""
+
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=False, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def has_match(module, ssid, api_url, api_pwd, api_usr, body):
+ compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
+ 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
+ desired_state = dict((x, (body.get(x))) for x in compare_keys)
+ label_exists = False
+ matches_spec = False
+ current_state = None
+ async_id = None
+ api_data = None
+ desired_name = body.get('name')
+ endpoint = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + endpoint
+ try:
+ rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS)
+ except Exception:
+ error = get_exception()
+ module.exit_json(exception="Error finding a match. Message: %s" % str(error))
+
+ for async_group in data:
+ if async_group['label'] == desired_name:
+ label_exists = True
+ api_data = async_group
+ async_id = async_group['groupRef']
+ current_state = dict(
+ syncIntervalMinutes=async_group['syncIntervalMinutes'],
+ syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'],
+ recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'],
+ repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'],
+ )
+
+ if current_state == desired_state:
+ matches_spec = True
+
+ return label_exists, matches_spec, api_data, async_id
+
+
+def create_async(module, ssid, api_url, api_pwd, api_usr, body):
+ endpoint = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + endpoint
+ post_data = json.dumps(body)
+ try:
+ rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd,
+ headers=HEADERS)
+ except Exception:
+ error = get_exception()
+ module.exit_json(exception="Exception while creating aysnc mirror group. Message: %s" % str(error))
+ return data
+
+
+def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
+ url = api_url + endpoint
+ compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
+ 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
+ desired_state = dict((x, (body.get(x))) for x in compare_keys)
+
+ if new_name:
+ desired_state['new_name'] = new_name
+
+ post_data = json.dumps(desired_state)
+
+ try:
+ rc, data = request(url, data=post_data, method='POST', headers=HEADERS,
+ url_username=user, url_password=pwd)
+ except Exception:
+ error = get_exception()
+ module.exit_json(exception="Exception while updating async mirror group. Message: %s" % str(error))
+
+ return data
+
+
+def remove_amg(module, ssid, api_url, pwd, user, async_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
+ url = api_url + endpoint
+ try:
+ rc, data = request(url, method='DELETE', url_username=user, url_password=pwd,
+ headers=HEADERS)
+ except Exception:
+ error = get_exception()
+ module.exit_json(exception="Exception while removing async mirror group. Message: %s" % str(error))
+
+ return
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ name=dict(required=True, type='str'),
+ new_name=dict(required=False, type='str'),
+ secondaryArrayId=dict(required=True, type='str'),
+ syncIntervalMinutes=dict(required=False, default=10, type='int'),
+ manualSync=dict(required=False, default=False, type='bool'),
+ recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'),
+ repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'),
+ interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'),
+ ssid=dict(required=True, type='str'),
+ state=dict(required=True, choices=['present', 'absent']),
+ syncWarnThresholdMinutes=dict(required=False, default=10, type='int')
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ new_name = p.pop('new_name')
+ state = p.pop('state')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p)
+
+ if state == 'present':
+ if name_exists and spec_matches:
+ module.exit_json(changed=False, msg="Desired state met", **api_data)
+ elif name_exists and not spec_matches:
+ results = update_async(module, ssid, api_url, pwd, user,
+ p, new_name, async_id)
+ module.exit_json(changed=True,
+ msg="Async mirror group updated", async_id=async_id,
+ **results)
+ elif not name_exists:
+ results = create_async(module, ssid, api_url, user, pwd, p)
+ module.exit_json(changed=True, **results)
+
+ elif state == 'absent':
+ if name_exists:
+ remove_amg(module, ssid, api_url, pwd, user, async_id)
+ module.exit_json(changed=True, msg="Async mirror group removed.",
+ async_id=async_id)
+ else:
+ module.exit_json(changed=False,
+ msg="Async Mirror group: %s already absent" % p['name'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_amg_role.py b/storage/netapp/netapp_e_amg_role.py
new file mode 100644
index 00000000000..bfe3c4b8334
--- /dev/null
+++ b/storage/netapp/netapp_e_amg_role.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_amg_role
+short_description: Update the role of a storage array within an Asynchronous Mirror Group (AMG).
+description:
+ - Update a storage array to become the primary or secondary instance in an asynchronous mirror group
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ description:
+ - The ID of the primary storage array for the async mirror action
+ required: yes
+ role:
+ description:
+ - Whether the array should be the primary or secondary array for the AMG
+ required: yes
+ choices: ['primary', 'secondary']
+ noSync:
+ description:
+ - Whether to avoid synchronization prior to role reversal
+ required: no
+ default: no
+ choices: [yes, no]
+ force:
+ description:
+ - Whether to force the role reversal regardless of the online-state of the primary
+ required: no
+ default: no
+"""
+
+EXAMPLES = """
+ - name: Update the role of a storage array
+ netapp_e_amg_role:
+ name: updating amg role
+ role: primary
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+"""
+
+RETURN = """
+msg:
+ description: Failure message
+ returned: failure
+ type: string
+ sample: "No Async Mirror Group with the name."
+"""
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
+ amg_exists = False
+ has_desired_role = False
+ amg_id = None
+ amg_data = None
+ get_amgs = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + get_amgs
+ try:
+ amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
+ headers=HEADERS)
+ except:
+ module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
+
+ for amg in amgs:
+ if amg['label'] == name:
+ amg_exists = True
+ amg_id = amg['id']
+ amg_data = amg
+ if amg['localRole'] == body.get('role'):
+ has_desired_role = True
+
+ return amg_exists, has_desired_role, amg_id, amg_data
+
+
+def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
+ url = api_url + endpoint
+ post_data = json.dumps(body)
+ try:
+ request(url, data=post_data, method='POST', url_username=api_usr,
+ url_password=api_pwd, headers=HEADERS)
+ except:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
+
+ status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
+ status_url = api_url + status_endpoint
+ try:
+ rc, status = request(status_url, method='GET', url_username=api_usr,
+ url_password=api_pwd, headers=HEADERS)
+ except:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to check status of AMG after role reversal. " +
+ "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
+
+ # Here we wait for the role reversal to complete
+ if 'roleChangeProgress' in status:
+ while status['roleChangeProgress'] != "none":
+ try:
+ rc, status = request(status_url, method='GET',
+ url_username=api_usr, url_password=api_pwd, headers=HEADERS)
+ except:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to check status of AMG after role reversal. " +
+ "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
+ return status
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ role=dict(required=True, choices=['primary', 'secondary']),
+ noSync=dict(required=False, type='bool', default=False),
+ force=dict(required=False, type='bool', default=False),
+ ssid=dict(required=True, type='str'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ name = p.pop('name')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
+
+ if not agm_exists:
+ module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
+ elif has_desired_role:
+ module.exit_json(changed=False, **amg_data)
+
+ else:
+ amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
+ if amg_data:
+ module.exit_json(changed=True, **amg_data)
+ else:
+ module.exit_json(changed=True, msg="AMG role changed.")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_amg_sync.py b/storage/netapp/netapp_e_amg_sync.py
new file mode 100644
index 00000000000..548b115ff0a
--- /dev/null
+++ b/storage/netapp/netapp_e_amg_sync.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_amg_sync
+short_description: Conduct synchronization actions on asynchronous member groups.
+description:
+ - Allows for the initialization, suspension and resumption of an asynchronous mirror group's synchronization for NetApp E-series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ description:
+ - The ID of the storage array containing the AMG you wish to target
+ name:
+ description:
+ - The name of the async mirror group you wish to target
+ required: yes
+ state:
+ description:
+ - The synchronization action you'd like to take.
+ - If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in progress, it will return with an OK status.
+ - If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended
+ choices:
+ - running
+ - suspended
+ required: yes
+ delete_recovery_point:
+ description:
+ - Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization.
+ - If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last failures point will be deleted and synchronization will continue.
+ - If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary and the failures point will be preserved.
+ - "NOTE: This only has impact for newly launched syncs."
+ choices:
+ - yes
+ - no
+ default: no
+"""
+EXAMPLES = """
+ - name: start AMG async
+ netapp_e_amg_sync:
+ name: "{{ amg_sync_name }}"
+ state: running
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+"""
+RETURN = """
+json:
+ description: The object attributes of the AMG.
+ returned: success
+ type: string
+ example:
+ {
+ "changed": false,
+ "connectionType": "fc",
+ "groupRef": "3700000060080E5000299C24000006EF57ACAC70",
+ "groupState": "optimal",
+ "id": "3700000060080E5000299C24000006EF57ACAC70",
+ "label": "made_with_ansible",
+ "localRole": "primary",
+ "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC",
+ "orphanGroup": false,
+ "recoveryPointAgeAlertThresholdMinutes": 20,
+ "remoteRole": "secondary",
+ "remoteTarget": {
+ "nodeName": {
+ "ioInterfaceType": "fc",
+ "iscsiNodeName": null,
+ "remoteNodeWWN": "20040080E5299F1C"
+ },
+ "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC",
+ "scsiinitiatorTargetBaseProperties": {
+ "ioInterfaceType": "fc",
+ "iscsiinitiatorTargetBaseParameters": null
+ }
+ },
+ "remoteTargetId": "ansible2",
+ "remoteTargetName": "Ansible2",
+ "remoteTargetWwn": "60080E5000299F880000000056A25D56",
+ "repositoryUtilizationWarnThreshold": 80,
+ "roleChangeProgress": "none",
+ "syncActivity": "idle",
+ "syncCompletionTimeAlertThresholdMinutes": 10,
+ "syncIntervalMinutes": 10,
+ "worldWideName": "60080E5000299C24000006EF57ACAC70"
+ }
+"""
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class AMGsync(object):
+ def __init__(self):
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ name=dict(required=True, type='str'),
+ ssid=dict(required=True, type='str'),
+ state=dict(required=True, type='str', choices=['running', 'suspended']),
+ delete_recovery_point=dict(required=False, type='bool', default=False)
+ ))
+ self.module = AnsibleModule(argument_spec=argument_spec)
+ args = self.module.params
+ self.name = args['name']
+ self.ssid = args['ssid']
+ self.state = args['state']
+ self.delete_recovery_point = args['delete_recovery_point']
+ try:
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.url = args['api_url']
+ except KeyError:
+ self.module.fail_json(msg="You must pass in api_username"
+ "and api_password and api_url to the module.")
+ self.certs = args['validate_certs']
+
+ self.post_headers = {
+ "Accept": "application/json",
+ "Content-Type": "application/json"
+ }
+ self.amg_id, self.amg_obj = self.get_amg()
+
+ def get_amg(self):
+ endpoint = self.url + '/storage-systems/%s/async-mirrors' % self.ssid
+ (rc, amg_objs) = request(endpoint, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ headers=self.post_headers)
+ try:
+ amg_id = filter(lambda d: d['label'] == self.name, amg_objs)[0]['id']
+ amg_obj = filter(lambda d: d['label'] == self.name, amg_objs)[0]
+ except IndexError:
+ self.module.fail_json(
+ msg="There is no async mirror group %s associated with storage array %s" % (self.name, self.ssid))
+ return amg_id, amg_obj
+
+ @property
+ def current_state(self):
+ amg_id, amg_obj = self.get_amg()
+ return amg_obj['syncActivity']
+
+ def run_sync_action(self):
+ # If we get to this point we know that the states differ, and there is no 'err' state,
+ # so no need to revalidate
+
+ post_body = dict()
+ if self.state == 'running':
+ if self.current_state == 'idle':
+ if self.delete_recovery_point:
+ post_body.update(dict(deleteRecoveryPointIfNecessary=self.delete_recovery_point))
+ suffix = 'sync'
+ else:
+ # In a suspended state
+ suffix = 'resume'
+ else:
+ suffix = 'suspend'
+
+ endpoint = self.url + "/storage-systems/%s/async-mirrors/%s/%s" % (self.ssid, self.amg_id, suffix)
+
+ (rc, resp) = request(endpoint, method='POST', url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs, data=json.dumps(post_body), headers=self.post_headers,
+ ignore_errors=True)
+
+ if not str(rc).startswith('2'):
+ self.module.fail_json(msg=str(resp['errorMessage']))
+
+ return resp
+
+ def apply(self):
+ state_map = dict(
+ running=['active'],
+ suspended=['userSuspended', 'internallySuspended', 'paused'],
+ err=['unkown', '_UNDEFINED'])
+
+ if self.current_state not in state_map[self.state]:
+ if self.current_state in state_map['err']:
+ self.module.fail_json(
+ msg="The sync is a state of '%s', this requires manual intervention. " +
+ "Please investigate and try again" % self.current_state)
+ else:
+ self.amg_obj = self.run_sync_action()
+
+ (ret, amg) = self.get_amg()
+ self.module.exit_json(changed=False, **amg)
+
+
+def main():
+ sync = AMGsync()
+ sync.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_auth.py b/storage/netapp/netapp_e_auth.py
new file mode 100644
index 00000000000..19bdb0bfea5
--- /dev/null
+++ b/storage/netapp/netapp_e_auth.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: netapp_e_auth
+short_description: Sets or updates the password for a storage array.
+description:
+ - Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web Services proxy. Note, all storage arrays do not have a Monitor or RO role.
+version_added: "2.2"
+author: Kevin Hulquest (@hulquest)
+options:
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ name:
+ description:
+ - The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use the ID instead.
+ required: False
+ ssid:
+ description:
+ - the identifier of the storage array in the Web Services Proxy.
+ required: False
+ set_admin:
+ description:
+ - Boolean value on whether to update the admin password. If set to false then the RO account is updated.
+ default: False
+ current_password:
+ description:
+ - The current admin password. This is not required if the password hasn't been set before.
+ required: False
+ new_password:
+ description:
+ - The password you would like to set. Cannot be more than 30 characters.
+ required: True
+ api_url:
+ description:
+ - The full API url.
+ - "Example: http://ENDPOINT:8080/devmgr/v2"
+ - This can optionally be set via an environment variable, API_URL
+ required: False
+ api_username:
+ description:
+ - The username used to authenticate against the API
+ - This can optionally be set via an environment variable, API_USERNAME
+ required: False
+ api_password:
+ description:
+ - The password used to authenticate against the API
+ - This can optionally be set via an environment variable, API_PASSWORD
+ required: False
+'''
+
+EXAMPLES = '''
+- name: Test module
+ netapp_e_auth:
+ name: trex
+ current_password: OldPasswd
+ new_password: NewPasswd
+ set_admin: yes
+ api_url: '{{ netapp_api_url }}'
+ api_username: '{{ netapp_api_username }}'
+ api_password: '{{ netapp_api_password }}'
+'''
+
+RETURN = '''
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: "Password Updated Successfully"
+'''
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json"
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def get_ssid(module, name, api_url, user, pwd):
+ count = 0
+ all_systems = 'storage-systems'
+ systems_url = api_url + all_systems
+ rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd)
+ for system in data:
+ if system['name'] == name:
+ count += 1
+ if count > 1:
+ module.fail_json(
+ msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " +
+ "Use the id instead")
+ else:
+ ssid = system['id']
+ else:
+ continue
+
+ if count == 0:
+ module.fail_json(msg="No storage array with the name %s was found" % name)
+
+ else:
+ return ssid
+
+
+def get_pwd_status(module, ssid, api_url, user, pwd):
+ pwd_status = "storage-systems/%s/passwords" % ssid
+ url = api_url + pwd_status
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
+ return data['readOnlyPasswordSet'], data['adminPasswordSet']
+ except HTTPError:
+ error = get_exception()
+ module.fail_json(msg="There was an issue with connecting, please check that your "
+ "endpoint is properly defined and your credentials are correct: %s" % str(error))
+
+
+def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd):
+ update_pwd = 'storage-systems/%s' % ssid
+ url = api_url + update_pwd
+ post_body = json.dumps(dict(storedPassword=pwd))
+ try:
+ rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr,
+ url_password=api_pwd)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, str(err)))
+ return data
+
+
+def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
+ set_pass = "storage-systems/%s/passwords" % ssid
+ url = api_url + set_pass
+
+ if not current_password:
+ current_password = ""
+
+ post_body = json.dumps(
+ dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))
+
+ try:
+ rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
+ ignore_errors=True)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, str(err)))
+
+ if rc == 422:
+ post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
+ try:
+ rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")
+
+ update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)
+
+ if int(rc) == 204:
+ return update_data
+ else:
+ module.fail_json(msg="%s:%s" % (rc, data))
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=False, type='str'),
+ ssid=dict(required=False, type='str'),
+ current_password=dict(required=False, no_log=True),
+ new_password=dict(required=True, no_log=True),
+ set_admin=dict(required=True, type='bool'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True)
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']],
+ required_one_of=[['name', 'ssid']])
+
+ name = module.params['name']
+ ssid = module.params['ssid']
+ current_password = module.params['current_password']
+ new_password = module.params['new_password']
+ set_admin = module.params['set_admin']
+ user = module.params['api_username']
+ pwd = module.params['api_password']
+ api_url = module.params['api_url']
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ if name:
+ ssid = get_ssid(module, name, api_url, user, pwd)
+
+ ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd)
+
+ if admin_pwd and not current_password:
+ module.fail_json(
+ msg="Admin account has a password set. " +
+ "You must supply current_password in order to update the RO or Admin passwords")
+
+ if len(new_password) > 30:
+ module.fail_json(msg="Passwords must not be greater than 30 characters in length")
+
+ success = set_password(module, ssid, api_url, user, pwd, current_password=current_password,
+ new_password=new_password,
+ set_admin=set_admin)
+
+ module.exit_json(changed=True, msg="Password Updated Successfully", **success)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_facts.py b/storage/netapp/netapp_e_facts.py
new file mode 100644
index 00000000000..5a877afab61
--- /dev/null
+++ b/storage/netapp/netapp_e_facts.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: netapp_e_facts
+version_added: '2.2'
+short_description: Get facts about NetApp E-Series arrays
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage. This value must be unique for each array.
+
+description:
+ - Return various information about NetApp E-Series storage arrays (eg, configuration, disks)
+
+author: Kevin Hulquest (@hulquest)
+'''
+
+EXAMPLES = """
+---
+ - name: Get array facts
+ netapp_e_facts:
+ array_id: "{{ netapp_array_id }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+"""
+
+RETURN = """
+msg: Gathered facts for .
+"""
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(required=True))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ p = module.params
+
+ ssid = p['ssid']
+ validate_certs = p['validate_certs']
+
+ api_usr = p['api_username']
+ api_pwd = p['api_password']
+ api_url = p['api_url']
+
+ facts = dict(ssid=ssid)
+
+ # fetch the list of storage-pool objects and look for one with a matching name
+ try:
+ (rc, resp) = request(api_url + "/storage-systems/%s/graph" % ssid,
+ headers=dict(Accept="application/json"),
+ url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs)
+ except:
+ error = get_exception()
+ module.fail_json(
+ msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (ssid, str(error)))
+
+ facts['snapshot_images'] = [
+ dict(
+ id=d['id'],
+ status=d['status'],
+ pit_capacity=d['pitCapacity'],
+ creation_method=d['creationMethod'],
+ reposity_cap_utilization=d['repositoryCapacityUtilization'],
+ active_cow=d['activeCOW'],
+ rollback_source=d['isRollbackSource']
+ ) for d in resp['highLevelVolBundle']['pit']]
+
+ facts['netapp_disks'] = [
+ dict(
+ id=d['id'],
+ available=d['available'],
+ media_type=d['driveMediaType'],
+ status=d['status'],
+ usable_bytes=d['usableCapacity'],
+ tray_ref=d['physicalLocation']['trayRef'],
+ product_id=d['productID'],
+ firmware_version=d['firmwareVersion'],
+ serial_number=d['serialNumber'].lstrip()
+ ) for d in resp['drive']]
+
+ facts['netapp_storage_pools'] = [
+ dict(
+ id=sp['id'],
+ name=sp['name'],
+ available_capacity=sp['freeSpace'],
+ total_capacity=sp['totalRaidedSpace'],
+ used_capacity=sp['usedSpace']
+ ) for sp in resp['volumeGroup']]
+
+ all_volumes = list(resp['volume'])
+ # all_volumes.extend(resp['thinVolume'])
+
+ # TODO: exclude thin-volume repo volumes (how to ID?)
+ facts['netapp_volumes'] = [
+ dict(
+ id=v['id'],
+ name=v['name'],
+ parent_storage_pool_id=v['volumeGroupRef'],
+ capacity=v['capacity'],
+ is_thin_provisioned=v['thinProvisioned']
+ ) for v in all_volumes]
+
+ features = [f for f in resp['sa']['capabilities']]
+ features.extend([f['capability'] for f in resp['sa']['premiumFeatures'] if f['isEnabled']])
+ features = list(set(features)) # ensure unique
+ features.sort()
+ facts['netapp_enabled_features'] = features
+
+ # TODO: include other details about the storage pool (size, type, id, etc)
+ result = dict(ansible_facts=facts, changed=False)
+ module.exit_json(msg="Gathered facts for %s." % ssid, **result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/storage/netapp/netapp_e_flashcache.py b/storage/netapp/netapp_e_flashcache.py
new file mode 100644
index 00000000000..da7d520542b
--- /dev/null
+++ b/storage/netapp/netapp_e_flashcache.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: netapp_e_flashcache
+author: Kevin Hulquest (@hulquest)
+version_added: '2.2'
+short_description: Manage NetApp SSD caches
+description:
+- Create or remove SSD caches on a NetApp E-Series storage array.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage (as configured on the web services proxy).
+ state:
+ required: true
+ description:
+ - Whether the specified SSD cache should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ name:
+ required: true
+ description:
+ - The name of the SSD cache to manage
+ io_type:
+ description:
+ - The type of workload to optimize the cache for.
+ choices: ['filesystem','database','media']
+ default: filesystem
+ disk_count:
+ description:
+ - The minimum number of disks to use for building the cache. The cache will be expanded if this number exceeds the number of disks already in place
+ size_unit:
+ description:
+ - The unit to be applied to size arguments
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: gb
+ cache_size_min:
+ description:
+ - The minimum size (in size_units) of the ssd cache. The cache will be expanded if this exceeds the current size of the cache.
+'''
+
+EXAMPLES = """
+ - name: Flash Cache
+ netapp_e_flashcache:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ name: SSDCacheBuiltByAnsible
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: json for newly created flash cache
+"""
+import json
+import logging
+import sys
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class NetAppESeriesFlashCache(object):
+ def __init__(self):
+ self.name = None
+ self.log_mode = None
+ self.log_path = None
+ self.api_url = None
+ self.api_username = None
+ self.api_password = None
+ self.ssid = None
+ self.validate_certs = None
+ self.disk_count = None
+ self.size_unit = None
+ self.cache_size_min = None
+ self.io_type = None
+ self.driveRefs = None
+ self.state = None
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ ssid=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ disk_count=dict(type='int'),
+ disk_refs=dict(type='list'),
+ cache_size_min=dict(type='int'),
+ io_type=dict(default='filesystem', choices=['filesystem', 'database', 'media']),
+ size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
+ type='str'),
+ criteria_disk_phy_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
+ type='str'),
+ log_mode=dict(type='str'),
+ log_path=dict(type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+
+ ],
+ mutually_exclusive=[
+
+ ],
+ # TODO: update validation for various selection criteria
+ supports_check_mode=True
+ )
+
+ self.__dict__.update(self.module.params)
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+ self.debug = self._logger.debug
+
+ if self.log_mode == 'file' and self.log_path:
+ logging.basicConfig(level=logging.DEBUG, filename=self.log_path)
+ elif self.log_mode == 'stderr':
+ logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
+
+ self.post_headers = dict(Accept="application/json")
+ self.post_headers['Content-Type'] = 'application/json'
+
+ def get_candidate_disks(self, disk_count, size_unit='gb', capacity=None):
+ self.debug("getting candidate disks...")
+
+ drives_req = dict(
+ driveCount=disk_count,
+ sizeUnit=size_unit,
+ driveType='ssd',
+ )
+
+ if capacity:
+ drives_req['targetUsableCapacity'] = capacity
+
+ (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid),
+ data=json.dumps(drives_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ if rc == 204:
+ self.module.fail_json(msg='Cannot find disks to match requested criteria for ssd cache')
+
+ disk_ids = [d['id'] for d in drives_resp]
+ bytes = reduce(lambda s, d: s + int(d['usableCapacity']), drives_resp, 0)
+
+ return (disk_ids, bytes)
+
+ def create_cache(self):
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=self.disk_count, size_unit=self.size_unit,
+ capacity=self.cache_size_min)
+
+ self.debug("creating ssd cache...")
+
+ create_fc_req = dict(
+ driveRefs=disk_ids,
+ name=self.name
+ )
+
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
+ data=json.dumps(create_fc_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def update_cache(self):
+ self.debug('updating flash cache config...')
+ update_fc_req = dict(
+ name=self.name,
+ configType=self.io_type
+ )
+
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/configure" % (self.ssid),
+ data=json.dumps(update_fc_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def delete_cache(self):
+ self.debug('deleting flash cache...')
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), method='DELETE',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs, ignore_errors=True)
+
+ @property
+ def needs_more_disks(self):
+ if len(self.cache_detail['driveRefs']) < self.disk_count:
+ self.debug("needs resize: current disk count %s < requested requested count %s" % (
+ len(self.cache_detail['driveRefs']), self.disk_count))
+ return True
+
+ @property
+ def needs_less_disks(self):
+ if len(self.cache_detail['driveRefs']) > self.disk_count:
+ self.debug("needs resize: current disk count %s < requested requested count %s" % (
+ len(self.cache_detail['driveRefs']), self.disk_count))
+ return True
+
+ @property
+ def current_size_bytes(self):
+ return int(self.cache_detail['fcDriveInfo']['fcWithDrives']['usedCapacity'])
+
+ @property
+ def requested_size_bytes(self):
+ if self.cache_size_min:
+ return self.cache_size_min * self._size_unit_map[self.size_unit]
+ else:
+ return 0
+
+ @property
+ def needs_more_capacity(self):
+ if self.current_size_bytes < self.requested_size_bytes:
+ self.debug("needs resize: current capacity %sb is less than requested minimum %sb" % (
+ self.current_size_bytes, self.requested_size_bytes))
+ return True
+
+ @property
+ def needs_resize(self):
+ return self.needs_more_disks or self.needs_more_capacity or self.needs_less_disks
+
+ def resize_cache(self):
+ # increase up to disk count first, then iteratively add disks until we meet requested capacity
+
+ # TODO: perform this calculation in check mode
+ current_disk_count = len(self.cache_detail['driveRefs'])
+ proposed_new_disks = 0
+
+ proposed_additional_bytes = 0
+ proposed_disk_ids = []
+
+ if self.needs_more_disks:
+ proposed_disk_count = self.disk_count - current_disk_count
+
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_disk_count)
+ proposed_additional_bytes = bytes
+ proposed_disk_ids = disk_ids
+
+ while self.current_size_bytes + proposed_additional_bytes < self.requested_size_bytes:
+ proposed_new_disks += 1
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_new_disks)
+ proposed_disk_ids = disk_ids
+ proposed_additional_bytes = bytes
+
+ add_drives_req = dict(
+ driveRef=proposed_disk_ids
+ )
+
+ self.debug("adding drives to flash-cache...")
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/addDrives" % (self.ssid),
+ data=json.dumps(add_drives_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ elif self.needs_less_disks and self.driveRefs:
+ rm_drives = dict(driveRef=self.driveRefs)
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/removeDrives" % (self.ssid),
+ data=json.dumps(rm_drives), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def apply(self):
+ result = dict(changed=False)
+ (rc, cache_resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs, ignore_errors=True)
+
+ if rc == 200:
+ self.cache_detail = cache_resp
+ else:
+ self.cache_detail = None
+
+ if rc not in [200, 404]:
+ raise Exception(
+ "Unexpected error code %s fetching flash cache detail. Response data was %s" % (rc, cache_resp))
+
+ if self.state == 'present':
+ if self.cache_detail:
+ # TODO: verify parameters against detail for changes
+ if self.cache_detail['name'] != self.name:
+ self.debug("CHANGED: name differs")
+ result['changed'] = True
+ if self.cache_detail['flashCacheBase']['configType'] != self.io_type:
+ self.debug("CHANGED: io_type differs")
+ result['changed'] = True
+ if self.needs_resize:
+ self.debug("CHANGED: resize required")
+ result['changed'] = True
+ else:
+ self.debug("CHANGED: requested state is 'present' but cache does not exist")
+ result['changed'] = True
+ else: # requested state is absent
+ if self.cache_detail:
+ self.debug("CHANGED: requested state is 'absent' but cache exists")
+ result['changed'] = True
+
+ if not result['changed']:
+ self.debug("no changes, exiting...")
+ self.module.exit_json(**result)
+
+ if self.module.check_mode:
+ self.debug("changes pending in check mode, exiting early...")
+ self.module.exit_json(**result)
+
+ if self.state == 'present':
+ if not self.cache_detail:
+ self.create_cache()
+ else:
+ if self.needs_resize:
+ self.resize_cache()
+
+ # run update here as well, since io_type can't be set on creation
+ self.update_cache()
+
+ elif self.state == 'absent':
+ self.delete_cache()
+
+ # TODO: include other details about the storage pool (size, type, id, etc)
+ self.module.exit_json(changed=result['changed'], **self.resp)
+
+
+def main():
+ sp = NetAppESeriesFlashCache()
+ try:
+ sp.apply()
+ except Exception:
+ e = get_exception()
+ sp.debug("Exception in apply(): \n%s" % str(e))
+ sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % str(e))
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_host.py b/storage/netapp/netapp_e_host.py
new file mode 100644
index 00000000000..458bb6fb8b6
--- /dev/null
+++ b/storage/netapp/netapp_e_host.py
@@ -0,0 +1,429 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_host
+short_description: manage eseries hosts
+description:
+ - Create, update, remove hosts on NetApp E-series storage arrays
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ description:
+ - the id of the storage array you wish to act against
+ required: True
+ name:
+ description:
+ - If the host doesnt yet exist, the label to assign at creation time.
+ - If the hosts already exists, this is what is used to identify the host to apply any desired changes
+ required: True
+ host_type_index:
+ description:
+ - The index that maps to host type you wish to create. It is recommended to use the M(netapp_e_facts) module to gather this information. Alternatively you can use the WSP portal to retrieve the information.
+ required: True
+ ports:
+ description:
+ - a list of of dictionaries of host ports you wish to associate with the newly created host
+ required: False
+ group:
+ description:
+ - the group you want the host to be a member of
+ required: False
+
+"""
+
+EXAMPLES = """
+ - name: Set Host Info
+ netapp_e_host:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ name: "{{ host_name }}"
+ host_type_index: "{{ host_type_index }}"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: The host has been created.
+"""
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data is None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class Host(object):
+ def __init__(self):
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ group=dict(type='str', required=False),
+ ports=dict(type='list', required=False),
+ force_port=dict(type='bool', default=False),
+ name=dict(type='str', required=True),
+ host_type_index=dict(type='int', required=True)
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec)
+ args = self.module.params
+ self.group = args['group']
+ self.ports = args['ports']
+ self.force_port = args['force_port']
+ self.name = args['name']
+ self.host_type_index = args['host_type_index']
+ self.state = args['state']
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.certs = args['validate_certs']
+ self.ports = args['ports']
+ self.post_body = dict()
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ @property
+ def valid_host_type(self):
+ try:
+ (rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ try:
+ match = filter(lambda host_type: host_type['index'] == self.host_type_index, host_types)[0]
+ return True
+ except IndexError:
+ self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
+
+ @property
+ def hostports_available(self):
+ used_ids = list()
+ try:
+ (rc, self.available_ports) = request(self.url + 'storage-systems/%s/unassociated-host-ports' % self.ssid,
+ url_password=self.pwd, url_username=self.user,
+ validate_certs=self.certs,
+ headers=HEADERS)
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to get unassociated host ports. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ if len(self.available_ports) > 0 and len(self.ports) <= len(self.available_ports):
+ for port in self.ports:
+ for free_port in self.available_ports:
+ # Desired Type matches but also make sure we havent already used the ID
+ if not free_port['id'] in used_ids:
+ # update the port arg to have an id attribute
+ used_ids.append(free_port['id'])
+ break
+
+ if len(used_ids) != len(self.ports) and not self.force_port:
+ self.module.fail_json(
+ msg="There are not enough free host ports with the specified port types to proceed")
+ else:
+ return True
+
+ else:
+ self.module.fail_json(msg="There are no host ports available OR there are not enough unassigned host ports")
+
+ @property
+ def group_id(self):
+ if self.group:
+ try:
+ (rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid,
+ url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ try:
+ group_obj = filter(lambda group: group['name'] == self.group, all_groups)[0]
+ return group_obj['id']
+ except IndexError:
+ self.module.fail_json(msg="No group with the name: %s exists" % self.group)
+ else:
+ # Return the value equivalent of no group
+ return "0000000000000000000000000000000000000000"
+
+ @property
+ def host_exists(self):
+ try:
+ (rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ self.all_hosts = all_hosts
+ try: # Try to grab the host object
+ self.host_obj = filter(lambda host: host['label'] == self.name, all_hosts)[0]
+ return True
+ except IndexError:
+ # Host with the name passed in does not exist
+ return False
+
+ @property
+ def needs_update(self):
+ needs_update = False
+ self.force_port_update = False
+
+ if self.host_obj['clusterRef'] != self.group_id or \
+ self.host_obj['hostTypeIndex'] != self.host_type_index:
+ needs_update = True
+
+ if self.ports:
+ if not self.host_obj['ports']:
+ needs_update = True
+ for arg_port in self.ports:
+ # First a quick check to see if the port is mapped to a different host
+ if not self.port_on_diff_host(arg_port):
+ for obj_port in self.host_obj['ports']:
+ if arg_port['label'] == obj_port['label']:
+ # Confirmed that port arg passed in exists on the host
+ # port_id = self.get_port_id(obj_port['label'])
+ if arg_port['type'] != obj_port['portId']['ioInterfaceType']:
+ needs_update = True
+ if 'iscsiChapSecret' in arg_port:
+ # No way to know the current secret attr, so always return True just in case
+ needs_update = True
+ else:
+ # If the user wants the ports to be reassigned, do it
+ if self.force_port:
+ self.force_port_update = True
+ needs_update = True
+ else:
+ self.module.fail_json(
+ msg="The port you specified:\n%s\n is associated with a different host. Specify force_port as True or try a different port spec" % arg_port)
+
+ return needs_update
+
+ def port_on_diff_host(self, arg_port):
+ """ Checks to see if a passed in port arg is present on a different host """
+ for host in self.all_hosts:
+ # Only check 'other' hosts
+ if self.host_obj['name'] != self.name:
+ for port in host['ports']:
+ # Check if the port label is found in the port dict list of each host
+ if arg_port['label'] == port['label']:
+ self.other_host = host
+ return True
+ return False
+
+ def reassign_ports(self, apply=True):
+ if not self.post_body:
+ self.post_body = dict(
+ portsToUpdate=dict()
+ )
+
+ for port in self.ports:
+ if self.port_on_diff_host(port):
+ self.post_body['portsToUpdate'].update(dict(
+ portRef=self.other_host['hostPortRef'],
+ hostRef=self.host_obj['id'],
+ # Doesnt yet address port identifier or chap secret
+ ))
+
+ if apply:
+ try:
+ (rc, self.host_obj) = request(
+ self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']),
+ url_username=self.user, url_password=self.pwd, headers=HEADERS,
+ validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to reassign host port. Host Id [%s]. Array Id [%s]. Error [%s]." % (
+ self.host_obj['id'], self.ssid, str(err)))
+
+ def update_host(self):
+ if self.ports:
+ if self.hostports_available:
+ if self.force_port_update is True:
+ self.reassign_ports(apply=False)
+ # Make sure that only ports that arent being reassigned are passed into the ports attr
+ self.ports = [port for port in self.ports if not self.port_on_diff_host(port)]
+
+ self.post_body['ports'] = self.ports
+
+ if self.group:
+ self.post_body['groupId'] = self.group_id
+
+ self.post_body['hostType'] = dict(index=self.host_type_index)
+
+ try:
+ (rc, self.host_obj) = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']),
+ url_username=self.user, url_password=self.pwd, headers=HEADERS,
+ validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ self.module.exit_json(changed=True, **self.host_obj)
+
+ def create_host(self):
+ post_body = dict(
+ name=self.name,
+ host_type=dict(index=self.host_type_index),
+ groupId=self.group_id,
+ ports=self.ports
+ )
+ if self.ports:
+ # Check that all supplied port args are valid
+ if self.hostports_available:
+ post_body.update(ports=self.ports)
+ elif not self.force_port:
+ self.module.fail_json(
+ msg="You supplied ports that are already in use. Supply force_port to True if you wish to reassign the ports")
+
+ if not self.host_exists:
+ try:
+ (rc, create_resp) = request(self.url + "storage-systems/%s/hosts" % self.ssid, method='POST',
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ data=json.dumps(post_body), headers=HEADERS)
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
+ else:
+ self.module.exit_json(changed=False,
+ msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name))
+
+ self.host_obj = create_resp
+
+ if self.ports and self.force_port:
+ self.reassign_ports()
+
+ self.module.exit_json(changed=True, **self.host_obj)
+
+ def remove_host(self):
+ try:
+ (rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']),
+ method='DELETE',
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to remote host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'],
+ self.ssid,
+ str(err)))
+
+ def apply(self):
+ if self.state == 'present':
+ if self.host_exists:
+ if self.needs_update and self.valid_host_type:
+ self.update_host()
+ else:
+ self.module.exit_json(changed=False, msg="Host already present.", id=self.ssid, label=self.name)
+ elif self.valid_host_type:
+ self.create_host()
+ else:
+ if self.host_exists:
+ self.remove_host()
+ self.module.exit_json(changed=True, msg="Host removed.")
+ else:
+ self.module.exit_json(changed=False, msg="Host already absent.", id=self.ssid, label=self.name)
+
+
+def main():
+ host = Host()
+ host.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_hostgroup.py b/storage/netapp/netapp_e_hostgroup.py
new file mode 100644
index 00000000000..f89397af59d
--- /dev/null
+++ b/storage/netapp/netapp_e_hostgroup.py
@@ -0,0 +1,417 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: netapp_e_hostgroup
+version_added: "2.2"
+short_description: Manage NetApp Storage Array Host Groups
+author: Kevin Hulquest (@hulquest)
+description:
+- Create, update or destroy host groups on a NetApp E-Series storage array.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage (as configured on the web services proxy).
+ state:
+ required: true
+ description:
+ - Whether the specified host group should exist or not.
+ choices: ['present', 'absent']
+ name:
+ required: false
+ description:
+ - The name of the host group to manage. Either this or C(id_num) must be supplied.
+ new_name:
+ required: false
+ description:
+ - specify this when you need to update the name of a host group
+ id:
+ required: false
+ description:
+ - The id number of the host group to manage. Either this or C(name) must be supplied.
+ hosts::
+ required: false
+ description:
+ - a list of host names/labels to add to the group
+'''
+EXAMPLES = '''
+ - name: Configure Hostgroup
+ netapp_e_hostgroup:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+'''
+RETURN = '''
+clusterRef:
+ description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
+ returned: always except when state is absent
+ type: string
+ sample: "3233343536373839303132333100000000000000"
+confirmLUNMappingCreation:
+ description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping will alter the volume access rights of other clusters, in addition to this one.
+ returned: always
+ type: boolean
+ sample: false
+hosts:
+ description: A list of the hosts that are part of the host group after all operations.
+ returned: always except when state is absent
+ type: list
+ sample: ["HostA","HostB"]
+id:
+ description: The id number of the hostgroup
+ returned: always except when state is absent
+ type: string
+ sample: "3233343536373839303132333100000000000000"
+isSAControlled:
+ description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false, indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
+ returned: always except when state is absent
+ type: boolean
+ sample: false
+label:
+ description: The user-assigned, descriptive label string for the cluster.
+ returned: always
+ type: string
+ sample: "MyHostGroup"
+name:
+ description: same as label
+ returned: always except when state is absent
+ type: string
+ sample: "MyHostGroup"
+protectionInformationCapableAccessMethod:
+ description: This field is true if the host has a PI capable access method.
+ returned: always except when state is absent
+ type: boolean
+ sample: true
+'''
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json"
+}
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def group_exists(module, id_type, ident, ssid, api_url, user, pwd):
+ rc, data = get_hostgroups(module, ssid, api_url, user, pwd)
+ for group in data:
+ if group[id_type] == ident:
+ return True, data
+ else:
+ continue
+
+ return False, data
+
+
+def get_hostgroups(module, ssid, api_url, user, pwd):
+ groups = "storage-systems/%s/host-groups" % ssid
+ url = api_url + groups
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
+ return rc, data
+ except HTTPError:
+ err = get_exception()
+ module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]." % (ssid, str(err)))
+
+
+def get_hostref(module, ssid, name, api_url, user, pwd):
+ all_hosts = 'storage-systems/%s/hosts' % ssid
+ url = api_url + all_hosts
+ try:
+ rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]." % (ssid, str(err)))
+
+ for host in data:
+ if host['name'] == name:
+ return host['hostRef']
+ else:
+ continue
+
+ module.fail_json(msg="No host with the name %s could be found" % name)
+
+
+def create_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None):
+ groups = "storage-systems/%s/host-groups" % ssid
+ url = api_url + groups
+ hostrefs = []
+
+ if hosts:
+ for host in hosts:
+ href = get_hostref(module, ssid, host, api_url, user, pwd)
+ hostrefs.append(href)
+
+ post_data = json.dumps(dict(name=name, hosts=hostrefs))
+ try:
+ rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(msg="Failed to create host group. Id [%s]. Error [%s]." % (ssid, str(err)))
+
+ return rc, data
+
+
+def update_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None, new_name=None):
+ gid = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
+ groups = "storage-systems/%s/host-groups/%s" % (ssid, gid)
+ url = api_url + groups
+ hostrefs = []
+
+ if hosts:
+ for host in hosts:
+ href = get_hostref(module, ssid, host, api_url, user, pwd)
+ hostrefs.append(href)
+
+ if new_name:
+ post_data = json.dumps(dict(name=new_name, hosts=hostrefs))
+ else:
+ post_data = json.dumps(dict(hosts=hostrefs))
+
+ try:
+ rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(msg="Failed to update host group. Group [%s]. Id [%s]. Error [%s]." % (gid, ssid,
+ str(err)))
+
+ return rc, data
+
+
+def delete_hostgroup(module, ssid, group_id, api_url, user, pwd):
+ groups = "storage-systems/%s/host-groups/%s" % (ssid, group_id)
+ url = api_url + groups
+ # TODO: Loop through hosts, do mapping to href, make new list to pass to data
+ try:
+ rc, data = request(url, method='DELETE', headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(msg="Failed to delete host group. Group [%s]. Id [%s]. Error [%s]." % (group_id, ssid, str(err)))
+
+ return rc, data
+
+
+def get_hostgroup_id(module, ssid, name, api_url, user, pwd):
+ all_groups = 'storage-systems/%s/host-groups' % ssid
+ url = api_url + all_groups
+ rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
+ for hg in data:
+ if hg['name'] == name:
+ return hg['id']
+ else:
+ continue
+
+ module.fail_json(msg="A hostgroup with the name %s could not be found" % name)
+
+
+def get_hosts_in_group(module, ssid, group_name, api_url, user, pwd):
+ all_groups = 'storage-systems/%s/host-groups' % ssid
+ g_url = api_url + all_groups
+ try:
+ g_rc, g_data = request(g_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed in first step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (group_name,
+ ssid,
+ str(err)))
+
+ all_hosts = 'storage-systems/%s/hosts' % ssid
+ h_url = api_url + all_hosts
+ try:
+ h_rc, h_data = request(h_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed in second step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (
+ group_name,
+ ssid,
+ str(err)))
+
+ hosts_in_group = []
+
+ for hg in g_data:
+ if hg['name'] == group_name:
+ clusterRef = hg['clusterRef']
+
+ for host in h_data:
+ if host['clusterRef'] == clusterRef:
+ hosts_in_group.append(host['name'])
+
+ return hosts_in_group
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ new_name=dict(required=False),
+ ssid=dict(required=True),
+ id=dict(required=False),
+ state=dict(required=True, choices=['present', 'absent']),
+ hosts=dict(required=False, type='list'),
+ api_url=dict(required=True),
+ api_username=dict(required=True),
+ validate_certs=dict(required=False, default=True),
+ api_password=dict(required=True, no_log=True)
+ ),
+ supports_check_mode=False,
+ mutually_exclusive=[['name', 'id']],
+ required_one_of=[['name', 'id']]
+ )
+
+ name = module.params['name']
+ new_name = module.params['new_name']
+ ssid = module.params['ssid']
+ id_num = module.params['id']
+ state = module.params['state']
+ hosts = module.params['hosts']
+ user = module.params['api_username']
+ pwd = module.params['api_password']
+ api_url = module.params['api_url']
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ if name:
+ id_type = 'name'
+ id_key = name
+ elif id_num:
+ id_type = 'id'
+ id_key = id_num
+
+ exists, group_data = group_exists(module, id_type, id_key, ssid, api_url, user, pwd)
+
+ if state == 'present':
+ if not exists:
+ try:
+ rc, data = create_hostgroup(module, ssid, name, api_url, user, pwd, hosts)
+ except Exception:
+ err = get_exception()
+ module.fail_json(msg="Failed to create a host group. Id [%s]. Error [%s]." % (ssid, str(err)))
+
+ hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
+ module.exit_json(changed=True, hosts=hosts, **data)
+ else:
+ current_hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
+
+ if not current_hosts:
+ current_hosts = []
+
+ if not hosts:
+ hosts = []
+
+ if set(current_hosts) != set(hosts):
+ try:
+ rc, data = update_hostgroup(module, ssid, name, api_url, user, pwd, hosts, new_name)
+ except Exception:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to update host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, str(err)))
+ module.exit_json(changed=True, hosts=hosts, **data)
+ else:
+ for group in group_data:
+ if group['name'] == name:
+ module.exit_json(changed=False, hosts=current_hosts, **group)
+
+ elif state == 'absent':
+ if exists:
+ hg_id = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
+ try:
+ rc, data = delete_hostgroup(module, ssid, hg_id, api_url, user, pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to delete host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, str(err)))
+
+ module.exit_json(changed=True, msg="Host Group deleted")
+ else:
+ module.exit_json(changed=False, msg="Host Group is already absent")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_lun_mapping.py b/storage/netapp/netapp_e_lun_mapping.py
new file mode 100644
index 00000000000..5c9d71973b4
--- /dev/null
+++ b/storage/netapp/netapp_e_lun_mapping.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: netapp_e_lun_mapping
+author: Kevin Hulquest (@hulquest)
+short_description: Create or Remove LUN Mappings
+description:
+ - Allows for the creation and removal of volume to host mappings for NetApp E-series storage arrays.
+version_added: "2.2"
+options:
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ description:
+ - "The storage system array identifier."
+ required: False
+ lun:
+ description:
+ - The LUN number you wish to give the mapping
+ - If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
+ required: False
+ default: 0
+ target:
+ description:
+ - The name of host or hostgroup you wish to assign to the mapping
+ - If omitted, the default hostgroup is used.
+ - If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
+ required: False
+ volume_name:
+ description:
+ - The name of the volume you wish to include in the mapping.
+ required: True
+ target_type:
+ description:
+ - Whether the target is a host or group.
+ - Required if supplying an explicit target.
+ required: False
+ choices: ["host", "group"]
+ state:
+ description:
+ - Present will ensure the mapping exists, absent will remove the mapping.
+ - All parameters I(lun), I(target), I(target_type) and I(volume_name) must still be supplied.
+ required: True
+ choices: ["present", "absent"]
+ api_url:
+ description:
+ - "The full API url. Example: http://ENDPOINT:8080/devmgr/v2"
+ - This can optionally be set via an environment variable, API_URL
+ required: False
+ api_username:
+ description:
+ - The username used to authenticate against the API. This can optionally be set via an environment variable, API_USERNAME
+ required: False
+ api_password:
+ description:
+ - The password used to authenticate against the API. This can optionally be set via an environment variable, API_PASSWORD
+ required: False
+'''
+
+EXAMPLES = '''
+---
+ - name: Lun Mapping Example
+ netapp_e_lun_mapping:
+ state: present
+ ssid: 1
+ lun: 12
+ target: Wilson
+ volume_name: Colby1
+ target_type: group
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+'''
+RETURN = '''
+msg: Mapping exists.
+msg: Mapping removed.
+'''
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json"
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def get_host_and_group_map(module, ssid, api_url, user, pwd):
+ mapping = dict(host=dict(), group=dict())
+
+ hostgroups = 'storage-systems/%s/host-groups' % ssid
+ groups_url = api_url + hostgroups
+ try:
+ hg_rc, hg_data = request(groups_url, headers=HEADERS, url_username=user, url_password=pwd)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]" % (ssid, str(err)))
+
+ for group in hg_data:
+ mapping['group'][group['name']] = group['id']
+
+ hosts = 'storage-systems/%s/hosts' % ssid
+ hosts_url = api_url + hosts
+ try:
+ h_rc, h_data = request(hosts_url, headers=HEADERS, url_username=user, url_password=pwd)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]" % (ssid, str(err)))
+
+ for host in h_data:
+ mapping['host'][host['name']] = host['id']
+
+ return mapping
+
+
+def get_volume_id(module, data, ssid, name, api_url, user, pwd):
+ qty = 0
+ for volume in data:
+ if volume['name'] == name:
+ qty += 1
+
+ if qty > 1:
+ module.fail_json(msg="More than one volume with the name: %s was found, "
+ "please use the volume WWN instead" % name)
+ else:
+ wwn = volume['wwn']
+
+ try:
+ return wwn
+ except NameError:
+ module.fail_json(msg="No volume with the name: %s, was found" % (name))
+
+
+def get_hostgroups(module, ssid, api_url, user, pwd):
+ groups = "storage-systems/%s/host-groups" % ssid
+ url = api_url + groups
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
+ return data
+ except Exception:
+ module.fail_json(msg="There was an issue with connecting, please check that your"
+ "endpoint is properly defined and your credentials are correct")
+
+
+def get_volumes(module, ssid, api_url, user, pwd, mappable):
+ volumes = 'storage-systems/%s/%s' % (ssid, mappable)
+ url = api_url + volumes
+ try:
+ rc, data = request(url, url_username=user, url_password=pwd)
+ except Exception:
+ err = get_exception()
+ module.fail_json(
+ msg="Failed to mappable objects. Type[%s. Id [%s]. Error [%s]." % (mappable, ssid, str(err)))
+ return data
+
+
+def get_lun_mappings(ssid, api_url, user, pwd, get_all=None):
+ mappings = 'storage-systems/%s/volume-mappings' % ssid
+ url = api_url + mappings
+ rc, data = request(url, url_username=user, url_password=pwd)
+
+ if not get_all:
+ remove_keys = ('ssid', 'perms', 'lunMappingRef', 'type', 'id')
+
+ for key in remove_keys:
+ for mapping in data:
+ del mapping[key]
+
+ return data
+
+
+def create_mapping(module, ssid, lun_map, vol_name, api_url, user, pwd):
+ mappings = 'storage-systems/%s/volume-mappings' % ssid
+ url = api_url + mappings
+ post_body = json.dumps(dict(
+ mappableObjectId=lun_map['volumeRef'],
+ targetId=lun_map['mapRef'],
+ lun=lun_map['lun']
+ ))
+
+ rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
+ ignore_errors=True)
+
+ if rc == 422:
+ data = move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd)
+ # module.fail_json(msg="The volume you specified '%s' is already "
+ # "part of a different LUN mapping. If you "
+ # "want to move it to a different host or "
+ # "hostgroup, then please use the "
+ # "netapp_e_move_lun module" % vol_name)
+ return data
+
+
+def move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd):
+ lun_id = get_lun_id(module, ssid, lun_map, api_url, user, pwd)
+ move_lun = "storage-systems/%s/volume-mappings/%s/move" % (ssid, lun_id)
+ url = api_url + move_lun
+ post_body = json.dumps(dict(targetId=lun_map['mapRef'], lun=lun_map['lun']))
+ rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS)
+ return data
+
+
+def get_lun_id(module, ssid, lun_mapping, api_url, user, pwd):
+ data = get_lun_mappings(ssid, api_url, user, pwd, get_all=True)
+
+ for lun_map in data:
+ if lun_map['volumeRef'] == lun_mapping['volumeRef']:
+ return lun_map['id']
+ # This shouldn't ever get called
+ module.fail_json(msg="No LUN map found.")
+
+
+def remove_mapping(module, ssid, lun_mapping, api_url, user, pwd):
+ lun_id = get_lun_id(module, ssid, lun_mapping, api_url, user, pwd)
+ lun_del = "storage-systems/%s/volume-mappings/%s" % (ssid, lun_id)
+ url = api_url + lun_del
+ rc, data = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS)
+ return data
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ target=dict(required=False, default=None),
+ target_type=dict(required=False, choices=['host', 'group']),
+ lun=dict(required=False, type='int', default=0),
+ ssid=dict(required=False),
+ volume_name=dict(required=True),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ state = module.params['state']
+ target = module.params['target']
+ target_type = module.params['target_type']
+ lun = module.params['lun']
+ ssid = module.params['ssid']
+ vol_name = module.params['volume_name']
+ user = module.params['api_username']
+ pwd = module.params['api_password']
+ api_url = module.params['api_url']
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ volume_map = get_volumes(module, ssid, api_url, user, pwd, "volumes")
+ thin_volume_map = get_volumes(module, ssid, api_url, user, pwd, "thin-volumes")
+ volref = None
+
+ for vol in volume_map:
+ if vol['label'] == vol_name:
+ volref = vol['volumeRef']
+
+ if not volref:
+ for vol in thin_volume_map:
+ if vol['label'] == vol_name:
+ volref = vol['volumeRef']
+
+ if not volref:
+ module.fail_json(changed=False, msg="No volume with the name %s was found" % vol_name)
+
+ host_and_group_mapping = get_host_and_group_map(module, ssid, api_url, user, pwd)
+
+ desired_lun_mapping = dict(
+ mapRef=host_and_group_mapping[target_type][target],
+ lun=lun,
+ volumeRef=volref
+ )
+
+ lun_mappings = get_lun_mappings(ssid, api_url, user, pwd)
+
+ if state == 'present':
+ if desired_lun_mapping in lun_mappings:
+ module.exit_json(changed=False, msg="Mapping exists")
+ else:
+ result = create_mapping(module, ssid, desired_lun_mapping, vol_name, api_url, user, pwd)
+ module.exit_json(changed=True, **result)
+
+ elif state == 'absent':
+ if desired_lun_mapping in lun_mappings:
+ result = remove_mapping(module, ssid, desired_lun_mapping, api_url, user, pwd)
+ module.exit_json(changed=True, msg="Mapping removed")
+ else:
+ module.exit_json(changed=False, msg="Mapping absent")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_snapshot_group.py b/storage/netapp/netapp_e_snapshot_group.py
new file mode 100644
index 00000000000..f0464bbf7c8
--- /dev/null
+++ b/storage/netapp/netapp_e_snapshot_group.py
@@ -0,0 +1,386 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_group
+short_description: Manage snapshot groups
+description:
+ - Create, update, delete snapshot groups for NetApp E-series storage arrays
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ state:
+ description:
+ - Whether to ensure the group is present or absent.
+ required: True
+ choices:
+ - present
+ - absent
+ name:
+ description:
+ - The name to give the snapshot group
+ required: True
+ base_volume_name:
+ description:
+ - The name of the base volume or thin volume to use as the base for the new snapshot group.
+ - If a snapshot group with an identical C(name) already exists but with a different base volume
+ an error will be returned.
+ required: True
+ repo_pct:
+ description:
+ - The size of the repository in relation to the size of the base volume
+ required: False
+ default: 20
+ warning_threshold:
+ description:
+ - The repository utilization warning threshold, as a percentage of the repository volume capacity.
+ required: False
+ default: 80
+ delete_limit:
+ description:
+ - The automatic deletion indicator.
+ - If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of snapshot images limited to the number specified.
+ - This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group.
+ required: False
+ default: 30
+ full_policy:
+ description:
+ - The behavior on when the data repository becomes full.
+ - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
+ required: False
+ default: purgepit
+ choices:
+ - purgepit
+ - unknown
+ - failbasewrites
+ - __UNDEFINED
+ storage_pool_name:
+ required: True
+ description:
+ - The name of the storage pool on which to allocate the repository volume.
+ rollback_priority:
+ required: False
+ description:
+ - The importance of the rollback operation.
+ - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
+ choices:
+ - highest
+ - high
+ - medium
+ - low
+ - lowest
+ - __UNDEFINED
+ default: medium
+"""
+
+EXAMPLES = """
+ - name: Configure Snapshot group
+ netapp_e_snapshot_group:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ base_volume_name: SSGroup_test
+ name=: OOSS_Group
+ repo_pct: 20
+ warning_threshold: 85
+ delete_limit: 30
+ full_policy: purgepit
+ storage_pool_name: Disk_Pool_1
+ rollback_priority: medium
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: json facts for newly created snapshot group.
+"""
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class SnapshotGroup(object):
+ def __init__(self):
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ base_volume_name=dict(required=True),
+ name=dict(required=True),
+ repo_pct=dict(default=20, type='int'),
+ warning_threshold=dict(default=80, type='int'),
+ delete_limit=dict(default=30, type='int'),
+ full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']),
+ rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']),
+ storage_pool_name=dict(type='str'),
+ ssid=dict(required=True),
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec)
+
+ self.post_data = dict()
+ self.warning_threshold = self.module.params['warning_threshold']
+ self.base_volume_name = self.module.params['base_volume_name']
+ self.name = self.module.params['name']
+ self.repo_pct = self.module.params['repo_pct']
+ self.delete_limit = self.module.params['delete_limit']
+ self.full_policy = self.module.params['full_policy']
+ self.rollback_priority = self.module.params['rollback_priority']
+ self.storage_pool_name = self.module.params['storage_pool_name']
+ self.state = self.module.params['state']
+
+ self.url = self.module.params['api_url']
+ self.user = self.module.params['api_username']
+ self.pwd = self.module.params['api_password']
+ self.certs = self.module.params['validate_certs']
+ self.ssid = self.module.params['ssid']
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ self.changed = False
+
+ @property
+ def pool_id(self):
+ pools = 'storage-systems/%s/storage-pools' % self.ssid
+ url = self.url + pools
+ try:
+ (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " +
+ "Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ for pool in data:
+ if pool['name'] == self.storage_pool_name:
+ self.pool_data = pool
+ return pool['id']
+
+ self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
+
+ @property
+ def volume_id(self):
+ volumes = 'storage-systems/%s/volumes' % self.ssid
+ url = self.url + volumes
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " +
+ "Id [%s]. Error [%s]." % (self.ssid, str(err)))
+ qty = 0
+ for volume in data:
+ if volume['name'] == self.base_volume_name:
+ qty += 1
+
+ if qty > 1:
+ self.module.fail_json(msg="More than one volume with the name: %s was found, "
+ "please ensure your volume has a unique name" % self.base_volume_name)
+ else:
+ Id = volume['id']
+ self.volume = volume
+
+ try:
+ return Id
+ except NameError:
+ self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name)
+
+ @property
+ def snapshot_group_id(self):
+ url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Failed to fetch snapshot groups. " +
+ "Id [%s]. Error [%s]." % (self.ssid, str(err)))
+ for ssg in data:
+ if ssg['name'] == self.name:
+ self.ssg_data = ssg
+ return ssg['id']
+
+ return None
+
+ @property
+ def ssg_needs_update(self):
+ if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \
+ self.ssg_data['autoDeleteLimit'] != self.delete_limit or \
+ self.ssg_data['repFullPolicy'] != self.full_policy or \
+ self.ssg_data['rollbackPriority'] != self.rollback_priority:
+ return True
+ else:
+ return False
+
+ def create_snapshot_group(self):
+ self.post_data = dict(
+ baseMappableObjectId=self.volume_id,
+ name=self.name,
+ repositoryPercentage=self.repo_pct,
+ warningThreshold=self.warning_threshold,
+ autoDeleteLimit=self.delete_limit,
+ fullPolicy=self.full_policy,
+ storagePoolId=self.pool_id,
+ )
+ snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid
+ url = self.url + snapshot
+ try:
+ rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Failed to create snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ if not self.snapshot_group_id:
+ self.snapshot_group_id = self.ssg_data['id']
+
+ if self.ssg_needs_update:
+ self.update_ssg()
+ else:
+ self.module.exit_json(changed=True, **self.ssg_data)
+
+ def update_ssg(self):
+ self.post_data = dict(
+ warningThreshold=self.warning_threshold,
+ autoDeleteLimit=self.delete_limit,
+ fullPolicy=self.full_policy,
+ rollbackPriority=self.rollback_priority
+ )
+
+ url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id)
+ try:
+ rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Failed to update snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ def apply(self):
+ if self.state == 'absent':
+ if self.snapshot_group_id:
+ try:
+ rc, resp = request(
+ self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id),
+ method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user,
+ validate_certs=self.certs)
+ except:
+ err = get_exception()
+ self.module.fail_json(msg="Failed to delete snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ str(err)))
+ self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data)
+ else:
+ self.module.exit_json(changed=False, msg="Snapshot group absent")
+
+ elif self.snapshot_group_id:
+ if self.ssg_needs_update:
+ self.update_ssg()
+ self.module.exit_json(changed=True, **self.ssg_data)
+ else:
+ self.module.exit_json(changed=False, **self.ssg_data)
+ else:
+ self.create_snapshot_group()
+
+
+def main():
+ vg = SnapshotGroup()
+ vg.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_snapshot_images.py b/storage/netapp/netapp_e_snapshot_images.py
new file mode 100644
index 00000000000..460d1a2a0c1
--- /dev/null
+++ b/storage/netapp/netapp_e_snapshot_images.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_images
+short_description: Create and delete snapshot images
+description:
+ - Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays.
+ - Only the oldest snapshot image can be deleted so consistency is preserved.
+ - "Related: Snapshot volumes are created from snapshot images."
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ snapshot_group:
+ description:
+ - The name of the snapshot group in which you want to create a snapshot image.
+ required: True
+ state:
+ description:
+ - Whether a new snapshot image should be created or oldest be deleted.
+ required: True
+ choices: ['create', 'remove']
+"""
+EXAMPLES = """
+ - name: Create Snapshot
+ netapp_e_snapshot_images:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_group: "3300000060080E5000299C24000005B656D9F394"
+ state: 'create'
+"""
+RETURN = """
+---
+ changed: true
+ msg: "Created snapshot image"
+ image_id: "3400000060080E5000299B640063074057BC5C5E "
+"""
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name):
+ snap_groups = 'storage-systems/%s/snapshot-groups' % ssid
+ snap_groups_url = api_url + snap_groups
+ (ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+
+ snapshot_group_id = None
+ for snapshot_group in snapshot_groups:
+ if name == snapshot_group['label']:
+ snapshot_group_id = snapshot_group['pitGroupRef']
+ break
+ if snapshot_group_id is None:
+ module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid))
+
+ return snapshot_group
+
+
+def oldest_image(module, ssid, api_url, api_pwd, api_usr, name):
+ get_status = 'storage-systems/%s/snapshot-images' % ssid
+ url = api_url + get_status
+
+ try:
+ (ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" %
+ (name, ssid, str(err)))
+ if not images:
+ module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid))
+
+ oldest = min(images, key=lambda x: x['pitSequenceNumber'])
+ if oldest is None or "pitRef" not in oldest:
+ module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid))
+
+ return oldest
+
+
+def create_image(module, ssid, api_url, pwd, user, p, snapshot_group):
+ snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group)
+ snapshot_group_id = snapshot_group_obj['pitGroupRef']
+ endpoint = 'storage-systems/%s/snapshot-images' % ssid
+ url = api_url + endpoint
+ post_data = json.dumps({'groupId': snapshot_group_id})
+
+ image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+
+ if image_data[1]['status'] == 'optimal':
+ status = True
+ id = image_data[1]['id']
+ else:
+ status = False
+ id = ''
+
+ return status, id
+
+
+def delete_image(module, ssid, api_url, pwd, user, snapshot_group):
+ image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group)
+ image_id = image['pitRef']
+ endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id)
+ url = api_url + endpoint
+
+ try:
+ (ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+ except Exception:
+ e = get_exception()
+ image_data = (e[0], e[1])
+
+ if ret == 204:
+ deleted_status = True
+ error_message = ''
+ else:
+ deleted_status = False
+ error_message = image_data[1]['errorMessage']
+
+ return deleted_status, error_message
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ snapshot_group=dict(required=True, type='str'),
+ ssid=dict(required=True, type='str'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ validate_certs=dict(required=False, default=True),
+ state=dict(required=True, choices=['create', 'remove'], type='str'),
+ ))
+ module = AnsibleModule(argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ snapshot_group = p.pop('snapshot_group')
+ desired_state = p.pop('state')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ if desired_state == 'create':
+ created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group)
+
+ if created_status:
+ module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id)
+ else:
+ module.fail_json(
+ msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group))
+ else:
+ deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group)
+
+ if deleted:
+ module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group))
+ else:
+ module.fail_json(
+ msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % (
+ ssid, snapshot_group, error_msg))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_snapshot_volume.py b/storage/netapp/netapp_e_snapshot_volume.py
new file mode 100644
index 00000000000..afc6e340aaf
--- /dev/null
+++ b/storage/netapp/netapp_e_snapshot_volume.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_volume
+short_description: Manage E/EF-Series snapshot volumes.
+description:
+ - Create, update, remove snapshot volumes for NetApp E/EF-Series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+note: Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status will be returned, no other changes can be made to a pre-existing snapshot volume.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ description:
+ - storage array ID
+ required: True
+ snapshot_image_id:
+ required: True
+ description:
+ - The identifier of the snapshot image used to create the new snapshot volume.
+ - "Note: You'll likely want to use the M(netapp_e_facts) module to find the ID of the image you want."
+ full_threshold:
+ description:
+ - The repository utilization warning threshold percentage
+ default: 85
+ name:
+ required: True
+ description:
+ - The name you wish to give the snapshot volume
+ view_mode:
+ required: True
+ description:
+ - The snapshot volume access mode
+ choices:
+ - modeUnknown
+ - readWrite
+ - readOnly
+ - __UNDEFINED
+ repo_percentage:
+ description:
+ - The size of the view in relation to the size of the base volume
+ default: 20
+ storage_pool_name:
+ description:
+ - Name of the storage pool on which to allocate the repository volume.
+ required: True
+ state:
+ description:
+ - Whether to create or remove the snapshot volume
+ required: True
+ choices:
+ - absent
+ - present
+"""
+EXAMPLES = """
+ - name: Snapshot volume
+ netapp_e_snapshot_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"/
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ state: present
+ storage_pool_name: "{{ snapshot_volume_storage_pool_name }}"
+ snapshot_image_id: "{{ snapshot_volume_image_id }}"
+ name: "{{ snapshot_volume_name }}"
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: Json facts for the volume that was created.
+"""
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class SnapshotVolume(object):
+ def __init__(self):
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(type='str', required=True),
+ snapshot_image_id=dict(type='str', required=True),
+ full_threshold=dict(type='int', default=85),
+ name=dict(type='str', required=True),
+ view_mode=dict(type='str', default='readOnly',
+ choices=['readOnly', 'readWrite', 'modeUnknown', '__Undefined']),
+ repo_percentage=dict(type='int', default=20),
+ storage_pool_name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present'])
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec)
+ args = self.module.params
+ self.state = args['state']
+ self.ssid = args['ssid']
+ self.snapshot_image_id = args['snapshot_image_id']
+ self.full_threshold = args['full_threshold']
+ self.name = args['name']
+ self.view_mode = args['view_mode']
+ self.repo_percentage = args['repo_percentage']
+ self.storage_pool_name = args['storage_pool_name']
+ self.url = args['api_url']
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.certs = args['validate_certs']
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ @property
+ def pool_id(self):
+ pools = 'storage-systems/%s/storage-pools' % self.ssid
+ url = self.url + pools
+ (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+
+ for pool in data:
+ if pool['name'] == self.storage_pool_name:
+ self.pool_data = pool
+ return pool['id']
+
+ self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
+
+ @property
+ def ss_vol_exists(self):
+ rc, ss_vols = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ if ss_vols:
+ for ss_vol in ss_vols:
+ if ss_vol['name'] == self.name:
+ self.ss_vol = ss_vol
+ return True
+ else:
+ return False
+
+ return False
+
+ @property
+ def ss_vol_needs_update(self):
+ if self.ss_vol['fullWarnThreshold'] != self.full_threshold:
+ return True
+ else:
+ return False
+
+ def create_ss_vol(self):
+ post_data = dict(
+ snapshotImageId=self.snapshot_image_id,
+ fullThreshold=self.full_threshold,
+ name=self.name,
+ viewMode=self.view_mode,
+ repositoryPercentage=self.repo_percentage,
+ repositoryPoolId=self.pool_id
+ )
+
+ rc, create_resp = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid,
+ data=json.dumps(post_data), headers=HEADERS, url_username=self.user,
+ url_password=self.pwd, validate_certs=self.certs, method='POST')
+
+ self.ss_vol = create_resp
+ # Doing a check after creation because the creation call fails to set the specified warning threshold
+ if self.ss_vol_needs_update:
+ self.update_ss_vol()
+ else:
+ self.module.exit_json(changed=True, **create_resp)
+
+ def update_ss_vol(self):
+ post_data = dict(
+ fullThreshold=self.full_threshold,
+ )
+
+ rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
+ data=json.dumps(post_data), headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ method='POST', validate_certs=self.certs)
+
+ self.module.exit_json(changed=True, **resp)
+
+ def remove_ss_vol(self):
+ rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
+ headers=HEADERS, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ method='DELETE')
+ self.module.exit_json(changed=True, msg="Volume successfully deleted")
+
+ def apply(self):
+ if self.state == 'present':
+ if self.ss_vol_exists:
+ if self.ss_vol_needs_update:
+ self.update_ss_vol()
+ else:
+ self.module.exit_json(changed=False, **self.ss_vol)
+ else:
+ self.create_ss_vol()
+ else:
+ if self.ss_vol_exists:
+ self.remove_ss_vol()
+ else:
+ self.module.exit_json(changed=False, msg="Volume already absent")
+
+
+def main():
+ sv = SnapshotVolume()
+ sv.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_storage_system.py b/storage/netapp/netapp_e_storage_system.py
new file mode 100644
index 00000000000..64414af6f1e
--- /dev/null
+++ b/storage/netapp/netapp_e_storage_system.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: netapp_e_storage_system
+version_added: "2.2"
+short_description: Add/remove arrays from the Web Services Proxy
+description:
+- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage. This value must be unique for each array.
+ state:
+ required: true
+ description:
+ - Whether the specified array should be configured on the Web Services Proxy or not.
+ choices: ['present', 'absent']
+ controller_addresses:
+ required: true
+ description:
+ - The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter.
+ array_wwn:
+ required: false
+ description:
+ - The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of controller_addresses parameter.
+ array_password:
+ required: false
+ description:
+ - The management password of the array to manage, if set.
+ enable_trace:
+ required: false
+ default: false
+ description:
+ - Enable trace logging for SYMbol calls to the storage system.
+ meta_tags:
+ required: false
+ default: None
+ description:
+ - Optional meta tags to associate to this storage system
+author: Kevin Hulquest (@hulquest)
+'''
+
+EXAMPLES = '''
+---
+ - name: Presence of storage system
+ netapp_e_storage_system:
+ ssid: "{{ item.key }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ controller_addresses:
+ - "{{ item.value.address1 }}"
+ - "{{ item.value.address2 }}"
+ with_dict: "{{ storage_systems }}"
+ when: check_storage_system
+'''
+
+RETURN = '''
+msg: Storage system removed.
+msg: Storage system added.
+'''
+import json
+from datetime import datetime as dt, timedelta
+from time import sleep
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout):
+ (rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers,
+ method='POST', url_username=api_usr, url_password=api_pwd,
+ validate_certs=validate_certs)
+ status = None
+ return_resp = resp
+ if 'status' in resp:
+ status = resp['status']
+
+ if rc == 201:
+ status = 'neverContacted'
+ fail_after_time = dt.utcnow() + timedelta(seconds=timeout)
+
+ while status == 'neverContacted':
+ if dt.utcnow() > fail_after_time:
+ raise Exception("web proxy timed out waiting for array status")
+
+ sleep(1)
+ (rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid,
+ headers=dict(Accept="application/json"), url_username=api_usr,
+ url_password=api_pwd, validate_certs=validate_certs,
+ ignore_errors=True)
+ status = system_resp['status']
+ return_resp = system_resp
+
+ return status, return_resp
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ ssid=dict(required=True, type='str'),
+ controller_addresses=dict(type='list'),
+ array_wwn=dict(required=False, type='str'),
+ array_password=dict(required=False, type='str', no_log=True),
+ array_status_timeout_sec=dict(default=60, type='int'),
+ enable_trace=dict(default=False, type='bool'),
+ meta_tags=dict(type='list')
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['controller_addresses', 'array_wwn']],
+ required_if=[('state', 'present', ['controller_addresses'])]
+ )
+
+ p = module.params
+
+ state = p['state']
+ ssid = p['ssid']
+ controller_addresses = p['controller_addresses']
+ array_wwn = p['array_wwn']
+ array_password = p['array_password']
+ array_status_timeout_sec = p['array_status_timeout_sec']
+ validate_certs = p['validate_certs']
+ meta_tags = p['meta_tags']
+ enable_trace = p['enable_trace']
+
+ api_usr = p['api_username']
+ api_pwd = p['api_password']
+ api_url = p['api_url']
+
+ changed = False
+ array_exists = False
+
+ try:
+ (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"),
+ url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs,
+ ignore_errors=True)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, str(err)))
+
+ array_exists = True
+ array_detail = resp
+
+ if rc == 200:
+ if state == 'absent':
+ changed = True
+ array_exists = False
+ elif state == 'present':
+ current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i)
+ if set(controller_addresses) != current_addresses:
+ changed = True
+ if array_detail['wwn'] != array_wwn and array_wwn is not None:
+ module.fail_json(
+ msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' % (ssid, array_detail['wwn']))
+ elif rc == 404:
+ if state == 'present':
+ changed = True
+ array_exists = False
+ else:
+ changed = False
+ module.exit_json(changed=changed, msg="Storage system was not present.")
+
+ if changed and not module.check_mode:
+ if state == 'present':
+ if not array_exists:
+ # add the array
+ array_add_req = dict(
+ id=ssid,
+ controllerAddresses=controller_addresses,
+ metaTags=meta_tags,
+ enableTrace=enable_trace
+ )
+
+ if array_wwn:
+ array_add_req['wwn'] = array_wwn
+
+ if array_password:
+ array_add_req['password'] = array_password
+
+ post_headers = dict(Accept="application/json")
+ post_headers['Content-Type'] = 'application/json'
+ request_data = json.dumps(array_add_req)
+
+ try:
+ (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data,
+ array_status_timeout_sec)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." %
+ (ssid, request_data, str(err)))
+
+ else: # array exists, modify...
+ post_headers = dict(Accept="application/json")
+ post_headers['Content-Type'] = 'application/json'
+ post_body = dict(
+ controllerAddresses=controller_addresses,
+ removeAllTags=True,
+ enableTrace=enable_trace,
+ metaTags=meta_tags
+ )
+
+ try:
+ (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body,
+ array_status_timeout_sec)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." %
+ (ssid, post_body, str(err)))
+
+ elif state == 'absent':
+ # delete the array
+ try:
+ (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE',
+ url_username=api_usr,
+ url_password=api_pwd, validate_certs=validate_certs)
+ except:
+ err = get_exception()
+ module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, str(err)))
+
+ if rc == 422:
+ module.exit_json(changed=changed, msg="Storage system was not presnt.")
+ if rc == 204:
+ module.exit_json(changed=changed, msg="Storage system removed.")
+
+ module.exit_json(changed=changed, **resp)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_storagepool.py b/storage/netapp/netapp_e_storagepool.py
new file mode 100644
index 00000000000..89309708efd
--- /dev/null
+++ b/storage/netapp/netapp_e_storagepool.py
@@ -0,0 +1,888 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: netapp_e_storagepool
+short_description: Manage disk groups and disk pools
+version_added: '2.2'
+description:
+ - Create or remove disk groups and disk pools for NetApp E-series storage arrays.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage (as configured on the web services proxy).
+ state:
+ required: true
+ description:
+ - Whether the specified storage pool should exist or not.
+ - Note that removing a storage pool currently requires the removal of all defined volumes first.
+ choices: ['present', 'absent']
+ name:
+ required: true
+ description:
+ - The name of the storage pool to manage
+ criteria_drive_count:
+ description:
+ - The number of disks to use for building the storage pool. The pool will be expanded if this number exceeds the number of disks already in place
+ criteria_drive_type:
+ description:
+ - The type of disk (hdd or ssd) to use when searching for candidates to use.
+ choices: ['hdd','ssd']
+ criteria_size_unit:
+ description:
+ - The unit used to interpret size parameters
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+ criteria_drive_min_size:
+ description:
+ - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
+ criteria_min_usable_capacity:
+ description:
+ - The minimum size of the storage pool (in size_unit). The pool will be expanded if this value exceeds itscurrent size.
+ criteria_drive_interface_type:
+ description:
+ - The interface type to use when selecting drives for the storage pool (no value means all interface types will be considered)
+ choices: ['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata']
+ criteria_drive_require_fde:
+ description:
+ - Whether full disk encryption ability is required for drives to be added to the storage pool
+ raid_level:
+ required: true
+ choices: ['raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']
+ description:
+ - "Only required when the requested state is 'present'. The RAID level of the storage pool to be created."
+ erase_secured_drives:
+ required: false
+ choices: ['true', 'false']
+ description:
+ - Whether to erase secured disks before adding to storage pool
+ secure_pool:
+ required: false
+ choices: ['true', 'false']
+ description:
+ - Whether to convert to a secure storage pool. Will only work if all drives in the pool are security capable.
+ reserve_drive_count:
+ required: false
+ description:
+ - Set the number of drives reserved by the storage pool for reconstruction operations. Only valide on raid disk pools.
+ remove_volumes:
+ required: false
+ default: False
+ description:
+ - Prior to removing a storage pool, delete all volumes in the pool.
+author: Kevin Hulquest (@hulquest)
+
+'''
+EXAMPLES = '''
+ - name: No disk groups
+ netapp_e_storagepool:
+ ssid: "{{ ssid }}"
+ name: "{{ item }}"
+ state: absent
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+'''
+RETURN = '''
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: Json facts for the pool that was created.
+'''
+
+import json
+import logging
+from traceback import format_exc
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def select(predicate, iterable):
+ # python 2, 3 generic filtering.
+ if predicate is None:
+ predicate = bool
+ for x in iterable:
+ if predicate(x):
+ yield x
+
+
+class groupby(object):
+ # python 2, 3 generic grouping.
+ def __init__(self, iterable, key=None):
+ if key is None:
+ key = lambda x: x
+ self.keyfunc = key
+ self.it = iter(iterable)
+ self.tgtkey = self.currkey = self.currvalue = object()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ while self.currkey == self.tgtkey:
+ self.currvalue = next(self.it) # Exit on StopIteration
+ self.currkey = self.keyfunc(self.currvalue)
+ self.tgtkey = self.currkey
+ return (self.currkey, self._grouper(self.tgtkey))
+
+ def _grouper(self, tgtkey):
+ while self.currkey == tgtkey:
+ yield self.currvalue
+ self.currvalue = next(self.it) # Exit on StopIteration
+ self.currkey = self.keyfunc(self.currvalue)
+
+
+class NetAppESeriesStoragePool(object):
+ def __init__(self):
+ self._sp_drives_cached = None
+
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ ssid=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ criteria_size_unit=dict(default='gb', type='str'),
+ criteria_drive_count=dict(type='int'),
+ criteria_drive_interface_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
+ type='str'),
+ criteria_drive_type=dict(choices=['ssd', 'hdd'], type='str'),
+ criteria_drive_min_size=dict(type='int'),
+ criteria_drive_require_fde=dict(type='bool'),
+ criteria_min_usable_capacity=dict(type='int'),
+ raid_level=dict(
+ choices=['raidUnsupported', 'raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']),
+ erase_secured_drives=dict(type='bool'),
+ log_path=dict(type='str'),
+ remove_drives=dict(type='list'),
+ secure_pool=dict(type='bool', default=False),
+ reserve_drive_count=dict(type='int'),
+ remove_volumes=dict(type='bool', default=False)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['raid_level'])
+ ],
+ mutually_exclusive=[
+
+ ],
+ # TODO: update validation for various selection criteria
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ log_path = p['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+ self.debug = self._logger.debug
+
+ if log_path:
+ logging.basicConfig(level=logging.DEBUG, filename=log_path)
+
+ self.state = p['state']
+ self.ssid = p['ssid']
+ self.name = p['name']
+ self.validate_certs = p['validate_certs']
+
+ self.criteria_drive_count = p['criteria_drive_count']
+ self.criteria_drive_type = p['criteria_drive_type']
+ self.criteria_size_unit = p['criteria_size_unit']
+ self.criteria_drive_min_size = p['criteria_drive_min_size']
+ self.criteria_min_usable_capacity = p['criteria_min_usable_capacity']
+ self.criteria_drive_interface_type = p['criteria_drive_interface_type']
+ self.criteria_drive_require_fde = p['criteria_drive_require_fde']
+
+ self.raid_level = p['raid_level']
+ self.erase_secured_drives = p['erase_secured_drives']
+ self.remove_drives = p['remove_drives']
+ self.secure_pool = p['secure_pool']
+ self.reserve_drive_count = p['reserve_drive_count']
+ self.remove_volumes = p['remove_volumes']
+
+ try:
+ self.api_usr = p['api_username']
+ self.api_pwd = p['api_password']
+ self.api_url = p['api_url']
+ except KeyError:
+ self.module.fail_json(msg="You must pass in api_username "
+ "and api_password and api_url to the module.")
+
+ self.post_headers = dict(Accept="application/json")
+ self.post_headers['Content-Type'] = 'application/json'
+
+ # Quick and dirty drive selector, since the one provided by web service proxy is broken for min_disk_size as of 2016-03-12.
+ # Doesn't really need to be a class once this is in module_utils or retired- just groups everything together so we
+ # can copy/paste to other modules more easily.
+ # Filters all disks by specified criteria, then groups remaining disks by capacity, interface and disk type, and selects
+ # the first set that matches the specified count and/or aggregate capacity.
+ # class DriveSelector(object):
+ def filter_drives(
+ self,
+ drives, # raw drives resp
+ interface_type=None, # sas, sata, fibre, etc
+ drive_type=None, # ssd/hdd
+ spindle_speed=None, # 7200, 10000, 15000, ssd (=0)
+ min_drive_size=None,
+ max_drive_size=None,
+ fde_required=None,
+ size_unit='gb',
+ min_total_capacity=None,
+ min_drive_count=None,
+ exact_drive_count=None,
+ raid_level=None
+ ):
+ if min_total_capacity is None and exact_drive_count is None:
+ raise Exception("One of criteria_min_total_capacity or criteria_drive_count must be specified.")
+
+ if min_total_capacity:
+ min_total_capacity = min_total_capacity * self._size_unit_map[size_unit]
+
+ # filter clearly invalid/unavailable drives first
+ drives = select(lambda d: self._is_valid_drive(d), drives)
+
+ if interface_type:
+ drives = select(lambda d: d['phyDriveType'] == interface_type, drives)
+
+ if drive_type:
+ drives = select(lambda d: d['driveMediaType'] == drive_type, drives)
+
+ if spindle_speed is not None: # 0 is valid for ssds
+ drives = select(lambda d: d['spindleSpeed'] == spindle_speed, drives)
+
+ if min_drive_size:
+ min_drive_size_bytes = min_drive_size * self._size_unit_map[size_unit]
+ drives = select(lambda d: int(d['rawCapacity']) >= min_drive_size_bytes, drives)
+
+ if max_drive_size:
+ max_drive_size_bytes = max_drive_size * self._size_unit_map[size_unit]
+ drives = select(lambda d: int(d['rawCapacity']) <= max_drive_size_bytes, drives)
+
+ if fde_required:
+ drives = select(lambda d: d['fdeCapable'], drives)
+
+ # initial implementation doesn't have a preference for any of these values...
+ # just return the first set we find that matches the requested disk count and/or minimum total capacity
+ for (cur_capacity, drives_by_capacity) in groupby(drives, lambda d: int(d['rawCapacity'])):
+ for (cur_interface_type, drives_by_interface_type) in groupby(drives_by_capacity,
+ lambda d: d['phyDriveType']):
+ for (cur_drive_type, drives_by_drive_type) in groupby(drives_by_interface_type,
+ lambda d: d['driveMediaType']):
+ # listify so we can consume more than once
+ drives_by_drive_type = list(drives_by_drive_type)
+ candidate_set = list() # reset candidate list on each iteration of the innermost loop
+
+ if exact_drive_count:
+ if len(drives_by_drive_type) < exact_drive_count:
+ continue # we know this set is too small, move on
+
+ for drive in drives_by_drive_type:
+ candidate_set.append(drive)
+ if self._candidate_set_passes(candidate_set, min_capacity_bytes=min_total_capacity,
+ min_drive_count=min_drive_count,
+ exact_drive_count=exact_drive_count, raid_level=raid_level):
+ return candidate_set
+
+ raise Exception("couldn't find an available set of disks to match specified criteria")
+
+ def _is_valid_drive(self, d):
+ is_valid = d['available'] \
+ and d['status'] == 'optimal' \
+ and not d['pfa'] \
+ and not d['removed'] \
+ and not d['uncertified'] \
+ and not d['invalidDriveData'] \
+ and not d['nonRedundantAccess']
+
+ return is_valid
+
+ def _candidate_set_passes(self, candidate_set, min_capacity_bytes=None, min_drive_count=None,
+ exact_drive_count=None, raid_level=None):
+ if not self._is_drive_count_valid(len(candidate_set), min_drive_count=min_drive_count,
+ exact_drive_count=exact_drive_count, raid_level=raid_level):
+ return False
+ # TODO: this assumes candidate_set is all the same size- if we want to allow wastage, need to update to use min size of set
+ if min_capacity_bytes is not None and self._calculate_usable_capacity(int(candidate_set[0]['rawCapacity']),
+ len(candidate_set),
+ raid_level=raid_level) < min_capacity_bytes:
+ return False
+
+ return True
+
+ def _calculate_usable_capacity(self, disk_size_bytes, disk_count, raid_level=None):
+ if raid_level in [None, 'raid0']:
+ return disk_size_bytes * disk_count
+ if raid_level == 'raid1':
+ return (disk_size_bytes * disk_count) / 2
+ if raid_level in ['raid3', 'raid5']:
+ return (disk_size_bytes * disk_count) - disk_size_bytes
+ if raid_level in ['raid6', 'raidDiskPool']:
+ return (disk_size_bytes * disk_count) - (disk_size_bytes * 2)
+ raise Exception("unsupported raid_level: %s" % raid_level)
+
+ def _is_drive_count_valid(self, drive_count, min_drive_count=0, exact_drive_count=None, raid_level=None):
+ if exact_drive_count and exact_drive_count != drive_count:
+ return False
+ if raid_level == 'raidDiskPool':
+ if drive_count < 11:
+ return False
+ if raid_level == 'raid1':
+ if drive_count % 2 != 0:
+ return False
+ if raid_level in ['raid3', 'raid5']:
+ if drive_count < 3:
+ return False
+ if raid_level == 'raid6':
+ if drive_count < 4:
+ return False
+ if min_drive_count and drive_count < min_drive_count:
+ return False
+
+ return True
+
+ def get_storage_pool(self, storage_pool_name):
+ # global ifilter
+ self.debug("fetching storage pools")
+ # map the storage pool name to its id
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
+ headers=dict(Accept="application/json"), url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs)
+ except Exception:
+ err = get_exception()
+ rc = err.args[0]
+ if rc == 404 and self.state == 'absent':
+ self.module.exit_json(
+ msg="Storage pool [%s] did not exist." % (self.name))
+ else:
+ err = get_exception()
+ self.module.exit_json(
+ msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." %
+ (self.ssid, str(err), self.state, rc))
+
+ self.debug("searching for storage pool '%s'" % storage_pool_name)
+
+ pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None)
+
+ if pool_detail:
+ found = 'found'
+ else:
+ found = 'not found'
+ self.debug(found)
+
+ return pool_detail
+
+ def get_candidate_disks(self):
+ self.debug("getting candidate disks...")
+
+ # driveCapacityMin is broken on /drives POST. Per NetApp request we built our own
+ # switch back to commented code below if it gets fixed
+ # drives_req = dict(
+ # driveCount = self.criteria_drive_count,
+ # sizeUnit = 'mb',
+ # raidLevel = self.raid_level
+ # )
+ #
+ # if self.criteria_drive_type:
+ # drives_req['driveType'] = self.criteria_drive_type
+ # if self.criteria_disk_min_aggregate_size_mb:
+ # drives_req['targetUsableCapacity'] = self.criteria_disk_min_aggregate_size_mb
+ #
+ # # TODO: this arg appears to be ignored, uncomment if it isn't
+ # #if self.criteria_disk_min_size_gb:
+ # # drives_req['driveCapacityMin'] = self.criteria_disk_min_size_gb * 1024
+ # (rc,drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), data=json.dumps(drives_req), headers=self.post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs)
+ #
+ # if rc == 204:
+ # self.module.fail_json(msg='Cannot find disks to match requested criteria for storage pool')
+
+ # disk_ids = [d['id'] for d in drives_resp]
+
+ try:
+ (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs)
+ except:
+ err = get_exception()
+ self.module.exit_json(
+ msg="Failed to fetch disk drives. Array id [%s]. Error[%s]." % (self.ssid, str(err)))
+
+ try:
+ candidate_set = self.filter_drives(drives_resp,
+ exact_drive_count=self.criteria_drive_count,
+ drive_type=self.criteria_drive_type,
+ min_drive_size=self.criteria_drive_min_size,
+ raid_level=self.raid_level,
+ size_unit=self.criteria_size_unit,
+ min_total_capacity=self.criteria_min_usable_capacity,
+ interface_type=self.criteria_drive_interface_type,
+ fde_required=self.criteria_drive_require_fde
+ )
+ except:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to allocate adequate drive count. Id [%s]. Error [%s]." % (self.ssid, str(err)))
+
+ disk_ids = [d['id'] for d in candidate_set]
+
+ return disk_ids
+
+ def create_storage_pool(self):
+ self.debug("creating storage pool...")
+
+ sp_add_req = dict(
+ raidLevel=self.raid_level,
+ diskDriveIds=self.disk_ids,
+ name=self.name
+ )
+
+ if self.erase_secured_drives:
+ sp_add_req['eraseSecuredDrives'] = self.erase_secured_drives
+
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
+ data=json.dumps(sp_add_req), headers=self.post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs,
+ timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to create storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
+ self.ssid,
+ str(err)))
+
+ self.pool_detail = self.get_storage_pool(self.name)
+
+ if self.secure_pool:
+ secure_pool_data = dict(securePool=True)
+ try:
+ (retc, r) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
+ data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
+ url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to update storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
+ self.ssid,
+ str(err)))
+
+ @property
+ def needs_raid_level_migration(self):
+ current_raid_level = self.pool_detail['raidLevel']
+ needs_migration = self.raid_level != current_raid_level
+
+ if needs_migration: # sanity check some things so we can fail early/check-mode
+ if current_raid_level == 'raidDiskPool':
+ self.module.fail_json(msg="raid level cannot be changed for disk pools")
+
+ return needs_migration
+
+ def migrate_raid_level(self):
+ self.debug("migrating storage pool to raid level '%s'..." % self.raid_level)
+ sp_raid_migrate_req = dict(
+ raidLevel=self.raid_level
+ )
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s/raid-type-migration" % (self.ssid,
+ self.name),
+ data=json.dumps(sp_raid_migrate_req), headers=self.post_headers, method='POST',
+ url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to change the raid level of storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
+ pool_id, self.ssid, str(err)))
+
+ @property
+ def sp_drives(self, exclude_hotspares=True):
+ if not self._sp_drives_cached:
+
+ self.debug("fetching drive list...")
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to fetch disk drives. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, self.ssid, str(err)))
+
+ sp_id = self.pool_detail['id']
+ if exclude_hotspares:
+ self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id and not d['hotSpare']]
+ else:
+ self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id]
+
+ return self._sp_drives_cached
+
+ @property
+ def reserved_drive_count_differs(self):
+ if int(self.pool_detail['volumeGroupData']['diskPoolData'][
+ 'reconstructionReservedDriveCount']) != self.reserve_drive_count:
+ return True
+ return False
+
+ @property
+ def needs_expansion(self):
+ if self.criteria_drive_count > len(self.sp_drives):
+ return True
+ # TODO: is totalRaidedSpace the best attribute for "how big is this SP"?
+ if self.criteria_min_usable_capacity and \
+ (self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]) > int(self.pool_detail['totalRaidedSpace']):
+ return True
+
+ return False
+
+ def get_expansion_candidate_drives(self):
+ # sanity checks; don't call this if we can't/don't need to expand
+ if not self.needs_expansion:
+ self.module.fail_json(msg="can't get expansion candidates when pool doesn't need expansion")
+
+ self.debug("fetching expansion candidate drives...")
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
+ self.pool_detail['id']),
+ method='GET', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
+ timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to fetch candidate drives for storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
+ pool_id, self.ssid, str(err)))
+
+ current_drive_count = len(self.sp_drives)
+ current_capacity_bytes = int(self.pool_detail['totalRaidedSpace']) # TODO: is this the right attribute to use?
+
+ if self.criteria_min_usable_capacity:
+ requested_capacity_bytes = self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]
+ else:
+ requested_capacity_bytes = current_capacity_bytes
+
+ if self.criteria_drive_count:
+ minimum_disks_to_add = max((self.criteria_drive_count - current_drive_count), 1)
+ else:
+ minimum_disks_to_add = 1
+
+ minimum_bytes_to_add = max(requested_capacity_bytes - current_capacity_bytes, 0)
+
+ # FUTURE: allow more control over expansion candidate selection?
+ # loop over candidate disk sets and add until we've met both criteria
+
+ added_drive_count = 0
+ added_capacity_bytes = 0
+
+ drives_to_add = set()
+
+ for s in resp:
+ # don't trust the API not to give us duplicate drives across candidate sets, especially in multi-drive sets
+ candidate_drives = s['drives']
+ if len(drives_to_add.intersection(candidate_drives)) != 0:
+ # duplicate, skip
+ continue
+ drives_to_add.update(candidate_drives)
+ added_drive_count += len(candidate_drives)
+ added_capacity_bytes += int(s['usableCapacity'])
+
+ if added_drive_count >= minimum_disks_to_add and added_capacity_bytes >= minimum_bytes_to_add:
+ break
+
+ if (added_drive_count < minimum_disks_to_add) or (added_capacity_bytes < minimum_bytes_to_add):
+ self.module.fail_json(
+ msg="unable to find at least %s drives to add that would add at least %s bytes of capacity" % (
+ minimum_disks_to_add, minimum_bytes_to_add))
+
+ return list(drives_to_add)
+
+ def expand_storage_pool(self):
+ drives_to_add = self.get_expansion_candidate_drives()
+
+ self.debug("adding %s drives to storage pool..." % len(drives_to_add))
+ sp_expand_req = dict(
+ drives=drives_to_add
+ )
+ try:
+ request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
+ self.pool_detail['id']),
+ data=json.dumps(sp_expand_req), headers=self.post_headers, method='POST', url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
+ self.ssid,
+ str(
+ err)))
+
+ # TODO: check response
+ # TODO: support blocking wait?
+
+ def reduce_drives(self, drive_list):
+ if all(drive in drive_list for drive in self.sp_drives):
+ # all the drives passed in are present in the system
+ pass
+ else:
+ self.module.fail_json(
+ msg="One of the drives you wish to remove does not currently exist in the storage pool you specified")
+
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s/reduction" % (self.ssid,
+ self.pool_detail['id']),
+ data=json.dumps(drive_list), headers=self.post_headers, method='POST', url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to remove drives from storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
+ pool_id, self.ssid, str(err)))
+
+ def update_reserve_drive_count(self, qty):
+ data = dict(reservedDriveCount=qty)
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
+ data=json.dumps(data), headers=self.post_headers, method='POST', url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
+ except:
+ err = get_exception()
+ pool_id = self.pool_detail['id']
+ self.module.exit_json(
+ msg="Failed to update reserve drive count. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
+ self.ssid,
+ str(
+ err)))
+
+ def apply(self):
+ changed = False
+ pool_exists = False
+
+ self.pool_detail = self.get_storage_pool(self.name)
+
+ if self.pool_detail:
+ pool_exists = True
+ pool_id = self.pool_detail['id']
+
+ if self.state == 'absent':
+ self.debug("CHANGED: storage pool exists, but requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ # sanity checks first- we can't change these, so we'll bomb if they're specified
+ if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail['driveMediaType']:
+ self.module.fail_json(
+ msg="drive media type %s cannot be changed to %s" % (self.pool_detail['driveMediaType'],
+ self.criteria_drive_type))
+
+ # now the things we can change...
+ if self.needs_expansion:
+ self.debug("CHANGED: storage pool needs expansion")
+ changed = True
+
+ if self.needs_raid_level_migration:
+ self.debug(
+ "CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'" % (
+ self.pool_detail['raidLevel'], self.raid_level))
+ changed = True
+
+ # if self.reserved_drive_count_differs:
+ # changed = True
+
+ # TODO: validate other state details? (pool priority, alert threshold)
+
+ # per FPoole and others, pool reduce operations will not be supported. Automatic "smart" reduction
+ # presents a difficult parameter issue, as the disk count can increase due to expansion, so we
+ # can't just use disk count > criteria_drive_count.
+
+ else: # pool does not exist
+ if self.state == 'present':
+ self.debug("CHANGED: storage pool does not exist, but requested state is 'present'")
+ changed = True
+
+ # ensure we can get back a workable set of disks
+ # (doing this early so candidate selection runs under check mode)
+ self.disk_ids = self.get_candidate_disks()
+ else:
+ self.module.exit_json(msg="Storage pool [%s] did not exist." % (self.name))
+
+ if changed and not self.module.check_mode:
+ # apply changes
+ if self.state == 'present':
+ if not pool_exists:
+ self.create_storage_pool()
+ else: # pool exists but differs, modify...
+ if self.needs_expansion:
+ self.expand_storage_pool()
+
+ if self.remove_drives:
+ self.reduce_drives(self.remove_drives)
+
+ if self.needs_raid_level_migration:
+ self.migrate_raid_level()
+
+ # if self.reserved_drive_count_differs:
+ # self.update_reserve_drive_count(self.reserve_drive_count)
+
+ if self.secure_pool:
+ secure_pool_data = dict(securePool=True)
+ try:
+ (retc, r) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid,
+ self.pool_detail[
+ 'id']),
+ data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
+ except:
+ err = get_exception()
+ self.module.exit_json(
+ msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
+ pool_id, self.ssid, str(err)))
+
+ if int(retc) == 422:
+ self.module.fail_json(
+ msg="Error in enabling secure pool. One of the drives in the specified storage pool is likely not security capable")
+
+ elif self.state == 'absent':
+ # delete the storage pool
+ try:
+ remove_vol_opt = ''
+ if self.remove_volumes:
+ remove_vol_opt = '?delete-volumes=true'
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/storage-pools/%s%s" % (self.ssid, pool_id,
+ remove_vol_opt),
+ method='DELETE',
+ url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
+ timeout=120)
+ except:
+ err = get_exception()
+ self.module.exit_json(
+ msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
+ self.ssid,
+ str(err)))
+
+ self.module.exit_json(changed=changed, **self.pool_detail)
+
+
+def main():
+ sp = NetAppESeriesStoragePool()
+ try:
+ sp.apply()
+ except Exception:
+ e = get_exception()
+ sp.debug("Exception in apply(): \n%s" % format_exc(e))
+ raise
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_volume.py b/storage/netapp/netapp_e_volume.py
new file mode 100644
index 00000000000..26107965855
--- /dev/null
+++ b/storage/netapp/netapp_e_volume.py
@@ -0,0 +1,622 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+from ansible.module_utils.api import basic_auth_argument_spec
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: netapp_e_volume
+version_added: "2.2"
+short_description: Manage storage volumes (standard and thin)
+description:
+ - Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ ssid:
+ required: true
+ description:
+ - The ID of the array to manage (as configured on the web services proxy).
+ state:
+ required: true
+ description:
+ - Whether the specified volume should exist or not.
+ choices: ['present', 'absent']
+ name:
+ required: true
+ description:
+ - The name of the volume to manage
+ storage_pool_name:
+ required: true
+ description:
+ - "Required only when requested state is 'present'. The name of the storage pool the volume should exist on."
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+ size:
+ required: true
+ description:
+ - "Required only when state = 'present'. The size of the volume in (size_unit)."
+ segment_size_kb:
+ description:
+ - The segment size of the new volume
+ default: 512
+ thin_provision:
+ description:
+ - Whether the volume should be thin provisioned. Thin volumes can only be created on disk pools (raidDiskPool).
+ default: False
+ choices: ['yes','no','true','false']
+ thin_volume_repo_size:
+ description:
+ - Initial size of the thin volume repository volume (in size_unit)
+ required: True
+ thin_volume_max_repo_size:
+ description:
+ - Maximum size that the thin volume repository volume will automatically expand to
+ default: same as size (in size_unit)
+ ssd_cache_enabled:
+ description:
+ - Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
+ default: None (ignores existing SSD cache setting)
+ choices: ['yes','no','true','false']
+ data_assurance_enabled:
+ description:
+ - If data assurance should be enabled for the volume
+ default: false
+
+# TODO: doc thin volume parameters
+
+author: Kevin Hulquest (@hulquest)
+
+'''
+EXAMPLES = '''
+ - name: No thin volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: NewThinVolumeByAnsible
+ state: absent
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ when: check_volume
+
+
+ - name: No fat volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ name: NewVolumeByAnsible
+ state: absent
+ log_path: /tmp/volume.log
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ when: check_volume
+'''
+RETURN = '''
+---
+msg: "Standard volume [workload_vol_1] has been created."
+msg: "Thin volume [workload_thin_vol] has been created."
+msg: "Volume [workload_vol_1] has been expanded."
+msg: "Volume [workload_vol_1] has been deleted."
+msg: "Volume [workload_vol_1] did not exist."
+msg: "Volume [workload_vol_1] already exists."
+'''
+
+import json
+import logging
+import time
+from traceback import format_exc
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data is None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def ifilter(predicate, iterable):
+ # python 2, 3 generic filtering.
+ if predicate is None:
+ predicate = bool
+ for x in iterable:
+ if predicate(x):
+ yield x
+
+
+class NetAppESeriesVolume(object):
+ def __init__(self):
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self._post_headers = dict(Accept="application/json")
+ self._post_headers['Content-Type'] = 'application/json'
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ ssid=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ storage_pool_name=dict(type='str'),
+ size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
+ type='str'),
+ size=dict(type='int'),
+ segment_size_kb=dict(default=128, choices=[8, 16, 32, 64, 128, 256, 512], type='int'),
+ ssd_cache_enabled=dict(type='bool'), # no default, leave existing setting alone
+ data_assurance_enabled=dict(default=False, type='bool'),
+ thin_provision=dict(default=False, type='bool'),
+ thin_volume_repo_size=dict(type='int'),
+ thin_volume_max_repo_size=dict(type='int'),
+ # TODO: add cache, owning controller support, thin expansion policy, etc
+ log_path=dict(type='str'),
+ api_url=dict(type='str'),
+ api_username=dict(type='str'),
+ api_password=dict(type='str'),
+ validate_certs=dict(type='bool'),
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['storage_pool_name', 'size']),
+ ('thin_provision', 'true', ['thin_volume_repo_size'])
+ ],
+ supports_check_mode=True)
+ p = self.module.params
+
+ log_path = p['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+ self.debug = self._logger.debug
+
+ if log_path:
+ logging.basicConfig(level=logging.DEBUG, filename=log_path)
+
+ self.state = p['state']
+ self.ssid = p['ssid']
+ self.name = p['name']
+ self.storage_pool_name = p['storage_pool_name']
+ self.size_unit = p['size_unit']
+ self.size = p['size']
+ self.segment_size_kb = p['segment_size_kb']
+ self.ssd_cache_enabled = p['ssd_cache_enabled']
+ self.data_assurance_enabled = p['data_assurance_enabled']
+ self.thin_provision = p['thin_provision']
+ self.thin_volume_repo_size = p['thin_volume_repo_size']
+ self.thin_volume_max_repo_size = p['thin_volume_max_repo_size']
+
+ if not self.thin_volume_max_repo_size:
+ self.thin_volume_max_repo_size = self.size
+
+ self.validate_certs = p['validate_certs']
+
+ try:
+ self.api_usr = p['api_username']
+ self.api_pwd = p['api_password']
+ self.api_url = p['api_url']
+ except KeyError:
+ self.module.fail_json(msg="You must pass in api_username "
+ "and api_password and api_url to the module.")
+
+ def get_volume(self, volume_name):
+ self.debug('fetching volumes')
+ # fetch the list of volume objects and look for one with a matching name (we'll need to merge volumes and thin-volumes)
+ try:
+ (rc, volumes) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
+ headers=dict(Accept="application/json"), url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to obtain list of standard/thick volumes. Array Id [%s]. Error[%s]." % (self.ssid,
+ str(err)))
+
+ try:
+ self.debug('fetching thin-volumes')
+ (rc, thinvols) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
+ headers=dict(Accept="application/json"), url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
+
+ volumes.extend(thinvols)
+
+ self.debug("searching for volume '%s'" % volume_name)
+ volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None)
+
+ if volume_detail:
+ self.debug('found')
+ else:
+ self.debug('not found')
+
+ return volume_detail
+
+ def get_storage_pool(self, storage_pool_name):
+ self.debug("fetching storage pools")
+ # map the storage pool name to its id
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
+ headers=dict(Accept="application/json"), url_username=self.api_usr,
+ url_password=self.api_pwd, validate_certs=self.validate_certs)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
+
+ self.debug("searching for storage pool '%s'" % storage_pool_name)
+ pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None)
+
+ if pool_detail:
+ self.debug('found')
+ else:
+ self.debug('not found')
+
+ return pool_detail
+
+ def create_volume(self, pool_id, name, size_unit, size, segment_size_kb, data_assurance_enabled):
+ volume_add_req = dict(
+ name=name,
+ poolId=pool_id,
+ sizeUnit=size_unit,
+ size=size,
+ segSize=segment_size_kb,
+ dataAssuranceEnabled=data_assurance_enabled,
+ )
+
+ self.debug("creating volume '%s'" % name)
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
+ data=json.dumps(volume_add_req), headers=self._post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs,
+ timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
+ str(err)))
+
+ def create_thin_volume(self, pool_id, name, size_unit, size, thin_volume_repo_size,
+ thin_volume_max_repo_size, data_assurance_enabled):
+ thin_volume_add_req = dict(
+ name=name,
+ poolId=pool_id,
+ sizeUnit=size_unit,
+ virtualSize=size,
+ repositorySize=thin_volume_repo_size,
+ maximumRepositorySize=thin_volume_max_repo_size,
+ dataAssuranceEnabled=data_assurance_enabled,
+ )
+
+ self.debug("creating thin-volume '%s'" % name)
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
+ data=json.dumps(thin_volume_add_req), headers=self._post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs,
+ timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ def delete_volume(self):
+ # delete the volume
+ self.debug("deleting volume '%s'" % self.volume_detail['name'])
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name,
+ self.volume_detail['id']),
+ method='DELETE', url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs, timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
+ str(err)))
+
+ @property
+ def volume_resource_name(self):
+ if self.volume_detail['thinProvisioned']:
+ return 'thin-volumes'
+ else:
+ return 'volumes'
+
+ @property
+ def volume_properties_changed(self):
+ return self.volume_ssdcache_setting_changed # or with other props here when extended
+
+ # TODO: add support for r/w cache settings, owning controller, scan settings, expansion policy, growth alert threshold
+
+ @property
+ def volume_ssdcache_setting_changed(self):
+ # None means ignore existing setting
+ if self.ssd_cache_enabled is not None and self.ssd_cache_enabled != self.volume_detail['flashCached']:
+ self.debug("flash cache setting changed")
+ return True
+
+ def update_volume_properties(self):
+ update_volume_req = dict()
+
+ # conditionally add values so we ignore unspecified props
+ if self.volume_ssdcache_setting_changed:
+ update_volume_req['flashCache'] = self.ssd_cache_enabled
+
+ self.debug("updating volume properties...")
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/%s/%s/" % (self.ssid, self.volume_resource_name,
+ self.volume_detail['id']),
+ data=json.dumps(update_volume_req), headers=self._post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
+ timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to update volume properties. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ @property
+ def volume_needs_expansion(self):
+ current_size_bytes = int(self.volume_detail['capacity'])
+ requested_size_bytes = self.size * self._size_unit_map[self.size_unit]
+
+ # TODO: check requested/current repo volume size for thin-volumes as well
+
+ # TODO: do we need to build any kind of slop factor in here?
+ return requested_size_bytes > current_size_bytes
+
+ def expand_volume(self):
+ is_thin = self.volume_detail['thinProvisioned']
+ if is_thin:
+ # TODO: support manual repo expansion as well
+ self.debug('expanding thin volume')
+ thin_volume_expand_req = dict(
+ newVirtualSize=self.size,
+ sizeUnit=self.size_unit
+ )
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes/%s/expand" % (self.ssid,
+ self.volume_detail[
+ 'id']),
+ data=json.dumps(thin_volume_expand_req), headers=self._post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs, timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ # TODO: check return code
+ else:
+ self.debug('expanding volume')
+ volume_expand_req = dict(
+ expansionSize=self.size,
+ sizeUnit=self.size_unit
+ )
+ try:
+ (rc, resp) = request(
+ self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
+ self.volume_detail['id']),
+ data=json.dumps(volume_expand_req), headers=self._post_headers, method='POST',
+ url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
+ timeout=120)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
+ self.ssid,
+ str(err)))
+
+ self.debug('polling for completion...')
+
+ while True:
+ try:
+ (rc, resp) = request(self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
+ self.volume_detail[
+ 'id']),
+ method='GET', url_username=self.api_usr, url_password=self.api_pwd,
+ validate_certs=self.validate_certs)
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(
+ msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]. Error[%s]." % (
+ self.name, self.ssid, str(err)))
+
+ action = resp['action']
+ percent_complete = resp['percentComplete']
+
+ self.debug('expand action %s, %s complete...' % (action, percent_complete))
+
+ if action == 'none':
+ self.debug('expand complete')
+ break
+ else:
+ time.sleep(5)
+
+ def apply(self):
+ changed = False
+ volume_exists = False
+ msg = None
+
+ self.volume_detail = self.get_volume(self.name)
+
+ if self.volume_detail:
+ volume_exists = True
+
+ if self.state == 'absent':
+ self.debug("CHANGED: volume exists, but requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ # check requested volume size, see if expansion is necessary
+ if self.volume_needs_expansion:
+ self.debug(
+ "CHANGED: requested volume size %s%s is larger than current size %sb" % (self.size,
+ self.size_unit,
+ self.volume_detail[
+ 'capacity']))
+ changed = True
+
+ if self.volume_properties_changed:
+ self.debug("CHANGED: one or more volume properties have changed")
+ changed = True
+
+ else:
+ if self.state == 'present':
+ self.debug("CHANGED: volume does not exist, but requested state is 'present'")
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ self.debug('skipping changes due to check mode')
+ else:
+ if self.state == 'present':
+ if not volume_exists:
+ pool_detail = self.get_storage_pool(self.storage_pool_name)
+
+ if not pool_detail:
+ self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
+
+ if self.thin_provision and not pool_detail['diskPool']:
+ self.module.fail_json(
+ msg='Thin provisioned volumes can only be located on disk pools (not volume groups)')
+
+ pool_id = pool_detail['id']
+
+ if not self.thin_provision:
+ self.create_volume(pool_id, self.name, self.size_unit, self.size, self.segment_size_kb,
+ self.data_assurance_enabled)
+ msg = "Standard volume [%s] has been created." % (self.name)
+
+ else:
+ self.create_thin_volume(pool_id, self.name, self.size_unit, self.size,
+ self.thin_volume_repo_size, self.thin_volume_max_repo_size,
+ self.data_assurance_enabled)
+ msg = "Thin volume [%s] has been created." % (self.name)
+
+ else: # volume exists but differs, modify...
+ if self.volume_needs_expansion:
+ self.expand_volume()
+ msg = "Volume [%s] has been expanded." % (self.name)
+
+ # this stuff always needs to run on present (since props can't be set on creation)
+ if self.volume_properties_changed:
+ self.update_volume_properties()
+ msg = "Properties of volume [%s] has been updated." % (self.name)
+
+ elif self.state == 'absent':
+ self.delete_volume()
+ msg = "Volume [%s] has been deleted." % (self.name)
+ else:
+ self.debug("exiting with no changes")
+ if self.state == 'absent':
+ msg = "Volume [%s] did not exist." % (self.name)
+ else:
+ msg = "Volume [%s] already exists." % (self.name)
+
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = NetAppESeriesVolume()
+
+ try:
+ v.apply()
+ except Exception:
+ e = get_exception()
+ v.debug("Exception in apply(): \n%s" % format_exc(e))
+ v.module.fail_json(msg="Module failed. Error [%s]." % (str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/storage/netapp/netapp_e_volume_copy.py b/storage/netapp/netapp_e_volume_copy.py
new file mode 100644
index 00000000000..179ee8ff5ad
--- /dev/null
+++ b/storage/netapp/netapp_e_volume_copy.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_volume_copy
+short_description: Create volume copy pairs
+description:
+ - Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ example:
+ - https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ source_volume_id:
+ description:
+ - The the id of the volume copy source.
+ - If used, must be paired with destination_volume_id
+ - Mutually exclusive with volume_copy_pair_id, and search_volume_id
+ destination_volume_id:
+ description:
+ - The the id of the volume copy destination.
+ - If used, must be paired with source_volume_id
+ - Mutually exclusive with volume_copy_pair_id, and search_volume_id
+ volume_copy_pair_id:
+ description:
+ - The the id of a given volume copy pair
+ - Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
+ - Can use to delete or check presence of volume pairs
+ - Must specify this or (destination_volume_id and source_volume_id)
+ state:
+ description:
+ - Whether the specified volume copy pair should exist or not.
+ required: True
+ choices: ['present', 'absent']
+ create_copy_pair_if_does_not_exist:
+ description:
+ - Defines if a copy pair will be created if it does not exist.
+ - If set to True destination_volume_id and source_volume_id are required.
+ choices: [True, False]
+ default: True
+ start_stop_copy:
+ description:
+ - starts a re-copy or stops a copy in progress
+ - "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
+ - Requires volume_copy_pair_id
+ search_volume_id:
+ description:
+ - Searches for all valid potential target and source volumes that could be used in a copy_pair
+ - Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
+"""
+RESULTS = """
+"""
+EXAMPLES = """
+---
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: Json facts for the volume copy that was created.
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: string
+ sample: Created Volume Copy Pair with ID
+"""
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError:
+ err = get_exception()
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, method='GET', url_username=params['api_username'],
+ url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ volume_copy_pair_id = None
+ for potential_copy_pair in resp:
+ if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
+ if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
+ volume_copy_pair_id = potential_copy_pair['id']
+
+ return volume_copy_pair_id
+
+
+def create_copy_pair(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
+ url = params['api_url'] + get_status
+
+ rData = {
+ "sourceId": params['source_volume_id'],
+ "targetId": params['destination_volume_id']
+ }
+
+ (rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 200:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def delete_copy_pair_by_copy_pair_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, ignore_errors=True, method='DELETE',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 204:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, ignore_errors=True, method='DELETE',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 200:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def start_stop_copy(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
+ params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='POST',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ return True, response_data[0]['percentComplete']
+ else:
+ return False, response_data
+
+
+def check_copy_status(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='GET',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ if response_data['percentComplete'] != -1:
+
+ return True, response_data['percentComplete']
+ else:
+ return False, response_data['percentComplete']
+ else:
+ return False, response_data
+
+
+def find_valid_copy_pair_targets_and_sources(params):
+ get_status = 'storage-systems/%s/volumes' % params['ssid']
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='GET',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ source_capacity = None
+ candidates = []
+ for volume in response_data:
+ if volume['id'] == params['search_volume_id']:
+ source_capacity = volume['capacity']
+ else:
+ candidates.append(volume)
+
+ potential_sources = []
+ potential_targets = []
+
+ for volume in candidates:
+ if volume['capacity'] > source_capacity:
+ if volume['volumeCopyTarget'] is False:
+ if volume['volumeCopySource'] is False:
+ potential_targets.append(volume['id'])
+ else:
+ if volume['volumeCopyTarget'] is False:
+ if volume['volumeCopySource'] is False:
+ potential_sources.append(volume['id'])
+
+ return potential_targets, potential_sources
+
+ else:
+ raise Exception("Response [%s]" % response_code)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ source_volume_id=dict(type='str'),
+ destination_volume_id=dict(type='str'),
+ copy_priority=dict(required=False, default=0, type='int'),
+ ssid=dict(required=True, type='str'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ validate_certs=dict(required=False, default=True),
+ targetWriteProtected=dict(required=False, default=True, type='bool'),
+ onlineCopy=dict(required=False, default=False, type='bool'),
+ volume_copy_pair_id=dict(type='str'),
+ status=dict(required=True, choices=['present', 'absent'], type='str'),
+ create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
+ start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
+ search_volume_id=dict(type='str'),
+ ),
+ mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
+ ['volume_copy_pair_id', 'source_volume_id'],
+ ['volume_copy_pair_id', 'search_volume_id'],
+ ['search_volume_id', 'destination_volume_id'],
+ ['search_volume_id', 'source_volume_id'],
+ ],
+ required_together=[['source_volume_id', 'destination_volume_id'],
+ ],
+ required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
+ ["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
+ ["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
+ ]
+
+ )
+ params = module.params
+
+ if not params['api_url'].endswith('/'):
+ params['api_url'] += '/'
+
+ # Check if we want to search
+ if params['search_volume_id'] is not None:
+ try:
+ potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
+ except:
+ e = get_exception()
+ module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % str(e))
+
+ module.exit_json(changed=False,
+ msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
+ search_volume_id=params['search_volume_id'],
+ valid_targets=potential_targets,
+ valid_sources=potential_sources)
+
+ # Check if we want to start or stop a copy operation
+ if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
+
+ # Get the current status info
+ currenty_running, status_info = check_copy_status(params)
+
+ # If we want to start
+ if params['start_stop_copy'] == 'start':
+
+ # If we have already started
+ if currenty_running is True:
+ module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
+ volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
+ # If we need to start
+ else:
+
+ start_status, info = start_stop_copy(params)
+
+ if start_status is True:
+ module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
+ volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
+ else:
+ module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
+
+ # If we want to stop
+ else:
+ # If it has already stopped
+ if currenty_running is False:
+ module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
+ volume_copy_pair_id=params['volume_copy_pair_id'])
+
+ # If we need to stop it
+ else:
+ start_status, info = start_stop_copy(params)
+
+ if start_status is True:
+ module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
+ volume_copy_pair_id=params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
+
+ # If we want the copy pair to exist we do this stuff
+ if params['status'] == 'present':
+
+ # We need to check if it exists first
+ if params['volume_copy_pair_id'] is None:
+ params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
+ params)
+
+ # If no volume copy pair is found we need need to make it.
+ if params['volume_copy_pair_id'] is None:
+
+ # In order to create we can not do so with just a volume_copy_pair_id
+
+ copy_began_status, (rc, resp) = create_copy_pair(params)
+
+ if copy_began_status is True:
+ module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
+ else:
+ module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
+
+ # If it does exist we do nothing
+ else:
+ # We verify that it exists
+ exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
+ params)
+
+ if exist_status:
+ module.exit_json(changed=False,
+ msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
+ else:
+ if exist_status_code == 404:
+ module.fail_json(
+ msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
+ params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
+ exist_status_code, exist_status_data))
+
+ module.fail_json(msg="Done")
+
+ # If we want it to not exist we do this
+ else:
+
+ if params['volume_copy_pair_id'] is None:
+ params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
+ params)
+
+ # We delete it by the volume_copy_pair_id
+ delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
+
+ if delete_status is True:
+ module.exit_json(changed=True,
+ msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
+ else:
+ if delete_status_code == 404:
+ module.exit_json(changed=False,
+ msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
+ delete_status_code, delete_status_data))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/system/alternatives.py b/system/alternatives.py
index 90e2237f86c..833ef27aaa5 100644
--- a/system/alternatives.py
+++ b/system/alternatives.py
@@ -22,6 +22,10 @@
along with Ansible. If not, see .
"""
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: alternatives
@@ -47,28 +51,48 @@
- The path to the symbolic link that should point to the real executable.
- This option is required on RHEL-based distributions
required: false
+ priority:
+ description:
+ - The priority of the alternative
+ required: false
+ default: 50
+ version_added: "2.2"
requirements: [ update-alternatives ]
'''
EXAMPLES = '''
- name: correct java version selected
- alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+ alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- name: alternatives link created
- alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible
+ alternatives:
+ name: hadoop-conf
+ link: /etc/hadoop/conf
+ path: /etc/hadoop/conf.ansible
+
+- name: make java 32 bit an alternative with low priority
+ alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
+ priority: -10
'''
-DEFAULT_LINK_PRIORITY = 50
-
import re
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
- path = dict(required=True),
- link = dict(required=False),
+ path = dict(required=True, type='path'),
+ link = dict(required=False, type='path'),
+ priority = dict(required=False, type='int',
+ default=50),
),
supports_check_mode=True,
)
@@ -77,6 +101,7 @@ def main():
name = params['name']
path = params['path']
link = params['link']
+ priority = params['priority']
UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True)
@@ -124,7 +149,7 @@ def main():
module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
module.run_command(
- [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)],
+ [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
check_rc=True
)
@@ -135,12 +160,11 @@ def main():
)
module.exit_json(changed=True)
- except subprocess.CalledProcessError, cpe:
+ except subprocess.CalledProcessError:
+ e = get_exception()
module.fail_json(msg=str(dir(cpe)))
else:
module.exit_json(changed=False)
-
-# import module snippets
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/at.py b/system/at.py
index 0ce9ff2c7d4..2c01c5d3195 100644
--- a/system/at.py
+++ b/system/at.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: at
@@ -64,13 +68,22 @@
EXAMPLES = '''
# Schedule a command to execute in 20 minutes as root.
-- at: command="ls -d / > /dev/null" count=20 units="minutes"
+- at:
+ command: "ls -d / > /dev/null"
+ count: 20
+ units: minutes
# Match a command to an existing job and delete the job.
-- at: command="ls -d / > /dev/null" state="absent"
+- at:
+ command: "ls -d / > /dev/null"
+ state: absent
# Schedule a command to execute in 20 minutes making sure it is unique in the queue.
-- at: command="ls -d / > /dev/null" unique=true count=20 units="minutes"
+- at:
+ command: "ls -d / > /dev/null"
+ unique: true
+ count: 20
+ units: minutes
'''
import os
@@ -197,4 +210,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/system/capabilities.py b/system/capabilities.py
index ce8ffcfa632..27f3c7519cc 100644
--- a/system/capabilities.py
+++ b/system/capabilities.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: capabilities
@@ -55,10 +59,16 @@
EXAMPLES = '''
# Set cap_sys_chroot+ep on /foo
-- capabilities: path=/foo capability=cap_sys_chroot+ep state=present
+- capabilities:
+ path: /foo
+ capability: cap_sys_chroot+ep
+ state: present
# Remove cap_net_bind_service from /bar
-- capabilities: path=/bar capability=cap_net_bind_service state=absent
+- capabilities:
+ path: /bar
+ capability: cap_net_bind_service
+ state: absent
'''
@@ -180,8 +190,9 @@ def main():
CapabilitiesModule(module)
- sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/system/cronvar.py b/system/cronvar.py
index b3b373e9dc3..a65610811b7 100644
--- a/system/cronvar.py
+++ b/system/cronvar.py
@@ -26,6 +26,10 @@
# This module is based on the crontab module.
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: cronvar
@@ -70,7 +74,9 @@
default: root
cron_file:
description:
- - If specified, uses this file in cron.d instead of an individual user's crontab.
+ - If specified, uses this file instead of an individual user's crontab.
+ Without a leading /, this is assumed to be in /etc/cron.d. With a leading
+ /, this is taken as absolute.
required: false
default: null
backup:
@@ -87,15 +93,22 @@
EXAMPLES = '''
# Ensure a variable exists.
# Creates an entry like "EMAIL=doug@ansibmod.con.com"
-- cronvar: name="EMAIL" value="doug@ansibmod.con.com"
+- cronvar:
+ name: EMAIL
+ value: doug@ansibmod.con.com
# Make sure a variable is gone. This will remove any variable named
# "LEGACY"
-- cronvar: name="LEGACY" state=absent
+- cronvar:
+ name: LEGACY
+ state: absent
# Adds a variable to a file under /etc/cron.d
-- cronvar: name="LOGFILE" value="/var/log/yum-autoupdate.log"
- user="root" cron_file=ansible_yum-autoupdate
+- cronvar:
+ name: LOGFILE
+ value: /var/log/yum-autoupdate.log
+ user: root
+ cron_file: ansible_yum-autoupdate
'''
import os
@@ -104,6 +117,8 @@
import platform
import pipes
import shlex
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
CRONCMD = "/usr/bin/crontab"
@@ -126,7 +141,11 @@ def __init__(self, module, user=None, cron_file=None):
self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"', ))
if cron_file:
- self.cron_file = '/etc/cron.d/%s' % cron_file
+ self.cron_file = ""
+ if os.path.isabs(cron_file):
+ self.cron_file = cron_file
+ else:
+ self.cron_file = os.path.join('/etc/cron.d', cron_file)
else:
self.cron_file = None
@@ -141,7 +160,8 @@ def read(self):
f = open(self.cron_file, 'r')
self.lines = f.read().splitlines()
f.close()
- except IOError, e:
+ except IOError:
+ e = get_exception()
# cron file does not exist
return
except:
@@ -197,7 +217,8 @@ def remove_variable_file(self):
try:
os.unlink(self.cron_file)
return True
- except OSError, e:
+ except OSError:
+ e = get_exception()
# cron file does not exist
return False
except:
@@ -357,7 +378,7 @@ def main():
res_args = dict()
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
- os.umask(022)
+ os.umask(int('022',8))
cronvar = CronVar(module, user, cron_file)
module.debug('cronvar instantiated - name: "%s"' % name)
@@ -419,7 +440,6 @@ def main():
# --- should never get here
module.exit_json(msg="Unable to execute cronvar task.")
-# import module snippets
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/crypttab.py b/system/crypttab.py
index 44d9f859791..f957a51293a 100644
--- a/system/crypttab.py
+++ b/system/crypttab.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: crypttab
@@ -29,7 +33,7 @@
name:
description:
- Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
- optionaly prefixed with C(/dev/mapper), as it appears in the filesystem. I(/dev/mapper)
+ optionaly prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/)
will be stripped from I(name).
required: true
default: null
@@ -52,7 +56,7 @@
default: null
password:
description:
- - Encryption password, the path to a file containing the pasword, or
+ - Encryption password, the path to a file containing the password, or
'none' or '-' if the password should be entered at boot.
required: false
default: "none"
@@ -73,15 +77,26 @@
'''
EXAMPLES = '''
-- name: Set the options explicitly a deivce which must already exist
- crypttab: name=luks-home state=present opts=discard,cipher=aes-cbc-essiv:sha256
+
+# Since column is a special character in YAML, if your string contains a column, it's better to use quotes around the string
+- name: Set the options explicitly a device which must already exist
+ crypttab:
+ name: luks-home
+ state: present
+ opts: 'discard,cipher=aes-cbc-essiv:sha256'
- name: Add the 'discard' option to any existing options for all devices
- crypttab: name={{ item.device }} state=opts_present opts=discard
- with_items: ansible_mounts
+ crypttab:
+ name: '{{ item.device }}'
+ state: opts_present
+ opts: discard
+ with_items: '{{ ansible_mounts }}'
when: '/dev/mapper/luks-' in {{ item.device }}
'''
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
def main():
module = AnsibleModule(
@@ -89,19 +104,22 @@ def main():
name = dict(required=True),
state = dict(required=True, choices=['present', 'absent', 'opts_present', 'opts_absent']),
backing_device = dict(default=None),
- password = dict(default=None),
+ password = dict(default=None, type='path'),
opts = dict(default=None),
- path = dict(default='/etc/crypttab')
+ path = dict(default='/etc/crypttab', type='path')
),
supports_check_mode = True
)
- name = module.params['name'].lstrip('/dev/mapper')
backing_device = module.params['backing_device']
password = module.params['password']
opts = module.params['opts']
state = module.params['state']
path = module.params['path']
+ name = module.params['name']
+ if name.startswith('/dev/mapper/'):
+ name = name[len('/dev/mapper/'):]
+
if state != 'absent' and backing_device is None and password is None and opts is None:
module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
@@ -123,7 +141,8 @@ def main():
try:
crypttab = Crypttab(path)
existing_line = crypttab.match(name)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg="failed to open and parse crypttab file: %s" % e,
**module.params)
@@ -202,6 +221,8 @@ def __str__(self):
for line in self._lines:
lines.append(str(line))
crypttab = '\n'.join(lines)
+ if len(crypttab) == 0:
+ crypttab += '\n'
if crypttab[-1] != '\n':
crypttab += '\n'
return crypttab
@@ -249,18 +270,18 @@ def _line_valid(self, line):
def _split_line(self, line):
fields = line.split()
try:
- field2 = field[2]
+ field2 = fields[2]
except IndexError:
field2 = None
try:
- field3 = field[3]
+ field3 = fields[3]
except IndexError:
field3 = None
return (fields[0],
fields[1],
field2,
- fields3)
+ field3)
def remove(self):
self.line, self.name, self.backing_device = '', None, None
@@ -303,7 +324,7 @@ def __init__(self, opts_string):
def add(self, opts_string):
changed = False
for k, v in Options(opts_string).items():
- if self.has_key(k):
+ if k in self:
if self[k] != v:
changed = True
else:
@@ -314,7 +335,7 @@ def add(self, opts_string):
def remove(self, opts_string):
changed = False
for k in Options(opts_string):
- if self.has_key(k):
+ if k in self:
del self[k]
changed = True
return changed, 'removed options'
@@ -332,7 +353,7 @@ def __iter__(self):
return iter(self.itemlist)
def __setitem__(self, key, value):
- if not self.has_key(key):
+ if key not in self:
self.itemlist.append(key)
super(Options, self).__setitem__(key, value)
@@ -353,6 +374,5 @@ def __str__(self):
ret.append('%s=%s' % (k, v))
return ','.join(ret)
-# import module snippets
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/debconf.py b/system/debconf.py
index b249986a947..224f2fbcb9b 100644
--- a/system/debconf.py
+++ b/system/debconf.py
@@ -21,6 +21,10 @@
along with Ansible. If not, see .
"""
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: debconf
@@ -51,11 +55,11 @@
aliases: ['setting', 'selection']
vtype:
description:
- - The type of the value supplied
+ - The type of the value supplied.
+ - C(seen) was added in 2.2.
required: false
default: null
- choices: [string, password, boolean, select, multiselect, note, error, title, text]
- aliases: []
+ choices: [string, password, boolean, select, multiselect, note, error, title, text, seen]
value:
description:
- Value to set the configuration to
@@ -67,23 +71,35 @@
- Do not set 'seen' flag when pre-seeding
required: false
default: False
- aliases: []
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# Set default locale to fr_FR.UTF-8
-debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8 vtype='select'
+- debconf:
+ name: locales
+ question: locales/default_environment_locale
+ value: fr_FR.UTF-8
+ vtype: select
# set to generate locales:
-debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' vtype='multiselect'
+- debconf:
+ name: locales
+ question: locales/locales_to_be_generated
+ value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
+ vtype: multiselect
# Accept oracle license
-debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select'
+- debconf:
+ name: oracle-java7-installer
+ question: shared/accepted-oracle-license-v1-1
+ value: true
+ vtype: select
# Specifying package you can register/return the list of questions and current values
-debconf: name='tzdata'
+- debconf:
+ name: tzdata
'''
def get_selections(module, pkg):
@@ -109,6 +125,11 @@ def set_selection(module, pkg, question, vtype, value, unseen):
if unseen:
cmd.append('-u')
+ if vtype == 'boolean':
+ if value == 'True':
+ value = 'true'
+ elif value == 'False':
+ value = 'false'
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
@@ -119,8 +140,8 @@ def main():
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='str'),
question = dict(required=False, aliases=['setting', 'selection'], type='str'),
- vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text']),
- value= dict(required=False, type='str'),
+ vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text', 'seen']),
+ value = dict(required=False, type='str', aliases=['answer']),
unseen = dict(required=False, type='bool'),
),
required_together = ( ['question','vtype', 'value'],),
@@ -157,12 +178,19 @@ def main():
prev = {question: prev[question]}
else:
prev[question] = ''
+ if module._diff:
+ after = prev.copy()
+ after.update(curr)
+ diff_dict = {'before': prev, 'after': after}
+ else:
+ diff_dict = {}
- module.exit_json(changed=changed, msg=msg, current=curr, previous=prev)
+ module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/facter.py b/system/facter.py
index 6c09877fcbe..5ae13ab7371 100644
--- a/system/facter.py
+++ b/system/facter.py
@@ -20,6 +20,10 @@
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: facter
@@ -47,12 +51,15 @@ def main():
argument_spec = dict()
)
- cmd = ["/usr/bin/env", "facter", "--puppet", "--json"]
+ facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
+
+ cmd = [facter_path, "--puppet", "--json"]
+
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(**json.loads(out))
# import module snippets
from ansible.module_utils.basic import *
-main()
-
+if __name__ == '__main__':
+ main()
diff --git a/system/filesystem.py b/system/filesystem.py
index b44168a0e06..d49360f09bc 100644
--- a/system/filesystem.py
+++ b/system/filesystem.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
author: "Alexander Bulimov (@abulimov)"
@@ -30,6 +34,7 @@
fstype:
description:
- File System type to be created.
+ - reiserfs support was added in 2.2.
required: true
dev:
description:
@@ -57,12 +62,61 @@
EXAMPLES = '''
# Create a ext2 filesystem on /dev/sdb1.
-- filesystem: fstype=ext2 dev=/dev/sdb1
+- filesystem:
+ fstype: ext2
+ dev: /dev/sdb1
# Create a ext4 filesystem on /dev/sdb1 and check disk blocks.
-- filesystem: fstype=ext4 dev=/dev/sdb1 opts="-cc"
+- filesystem:
+ fstype: ext4
+ dev: /dev/sdb1
+ opts: -cc
'''
+def _get_dev_size(dev, module):
+ """ Return size in bytes of device. Returns int """
+ blockdev_cmd = module.get_bin_path("blockdev", required=True)
+ rc, devsize_in_bytes, err = module.run_command("%s %s %s" % (blockdev_cmd, "--getsize64", dev))
+ return int(devsize_in_bytes)
+
+
+def _get_fs_size(fssize_cmd, dev, module):
+ """ Return size in bytes of filesystem on device. Returns int """
+ cmd = module.get_bin_path(fssize_cmd, required=True)
+ if 'tune2fs' == fssize_cmd:
+ # Get Block count and Block size
+ rc, size, err = module.run_command("%s %s %s" % (cmd, '-l', dev))
+ if rc == 0:
+ for line in size.splitlines():
+ if 'Block count:' in line:
+ block_count = int(line.split(':')[1].strip())
+ elif 'Block size:' in line:
+ block_size = int(line.split(':')[1].strip())
+ break
+ else:
+ module.fail_json(msg="Failed to get block count and block size of %s with %s" % (dev, cmd), rc=rc, err=err )
+ elif 'xfs_info' == fssize_cmd:
+ # Get Block count and Block size
+ rc, size, err = module.run_command("%s %s" % (cmd, dev))
+ if rc == 0:
+ for line in size.splitlines():
+ #if 'data' in line:
+ if 'data ' in line:
+ block_size = int(line.split('=')[2].split()[0])
+ block_count = int(line.split('=')[3].split(',')[0])
+ break
+ else:
+ module.fail_json(msg="Failed to get block count and block size of %s with %s" % (dev, cmd), rc=rc, err=err )
+ elif 'btrfs' == fssize_cmd:
+ #ToDo
+ # There is no way to get the blocksize and blockcount for btrfs filesystems
+ block_size = 1
+ block_count = 1
+
+
+ return block_size*block_count
+
+
def main():
module = AnsibleModule(
argument_spec = dict(
@@ -82,36 +136,49 @@ def main():
'grow' : 'resize2fs',
'grow_flag' : None,
'force_flag' : '-F',
+ 'fsinfo': 'tune2fs',
},
'ext3' : {
'mkfs' : 'mkfs.ext3',
'grow' : 'resize2fs',
'grow_flag' : None,
'force_flag' : '-F',
+ 'fsinfo': 'tune2fs',
},
'ext4' : {
'mkfs' : 'mkfs.ext4',
'grow' : 'resize2fs',
'grow_flag' : None,
'force_flag' : '-F',
+ 'fsinfo': 'tune2fs',
+ },
+ 'reiserfs' : {
+ 'mkfs' : 'mkfs.reiserfs',
+ 'grow' : 'resize_reiserfs',
+ 'grow_flag' : None,
+ 'force_flag' : '-f',
+ 'fsinfo': 'reiserfstune',
},
'ext4dev' : {
'mkfs' : 'mkfs.ext4',
'grow' : 'resize2fs',
'grow_flag' : None,
'force_flag' : '-F',
+ 'fsinfo': 'tune2fs',
},
'xfs' : {
'mkfs' : 'mkfs.xfs',
'grow' : 'xfs_growfs',
'grow_flag' : None,
'force_flag' : '-f',
+ 'fsinfo': 'xfs_info',
},
'btrfs' : {
'mkfs' : 'mkfs.btrfs',
'grow' : 'btrfs',
'grow_flag' : 'filesystem resize',
'force_flag' : '-f',
+ 'fsinfo': 'btrfs',
}
}
@@ -131,6 +198,7 @@ def main():
mkfscmd = fs_cmd_map[fstype]['mkfs']
force_flag = fs_cmd_map[fstype]['force_flag']
growcmd = fs_cmd_map[fstype]['grow']
+ fssize_cmd = fs_cmd_map[fstype]['fsinfo']
if not os.path.exists(dev):
module.fail_json(msg="Device %s not found."%dev)
@@ -140,13 +208,24 @@ def main():
rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
fs = raw_fs.strip()
- if fs == fstype and resizefs == False:
+ if fs == fstype and resizefs == False and not force:
module.exit_json(changed=False)
elif fs == fstype and resizefs == True:
- cmd = module.get_bin_path(growcmd, required=True)
- if module.check_mode:
- module.exit_json(changed=True, msg="May resize filesystem")
+ # Get dev and fs size and compare
+ devsize_in_bytes = _get_dev_size(dev, module)
+ fssize_in_bytes = _get_fs_size(fssize_cmd, dev, module)
+ if fssize_in_bytes < devsize_in_bytes:
+ fs_smaller = True
else:
+ fs_smaller = False
+
+
+ if module.check_mode and fs_smaller:
+ module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (fstype,dev))
+ elif module.check_mode and not fs_smaller:
+ module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev))
+ elif fs_smaller:
+ cmd = module.get_bin_path(growcmd, required=True)
rc,out,err = module.run_command("%s %s" % (cmd, dev))
# Sadly there is no easy way to determine if this has changed. For now, just say "true" and move on.
# in the future, you would have to parse the output to determine this.
@@ -155,6 +234,8 @@ def main():
module.exit_json(changed=True, msg=out)
else:
module.fail_json(msg="Resizing filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err)
+ else:
+ module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev))
elif fs and not force:
module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite"%(dev,fs), rc=rc, err=err)
@@ -180,4 +261,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/firewalld.py b/system/firewalld.py
index 47d98544000..8324069b1b3 100644
--- a/system/firewalld.py
+++ b/system/firewalld.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: firewalld
@@ -28,7 +32,7 @@
options:
service:
description:
- - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services."
+ - "Name of a service to add/remove to/from firewalld - service must be listed in output of firewall-cmd --get-services."
required: false
default: null
port:
@@ -47,6 +51,12 @@
required: false
default: null
version_added: "2.0"
+ interface:
+ description:
+ - 'The interface you would like to add/remove to/from a zone in firewalld'
+ required: false
+ default: null
+ version_added: "2.1"
zone:
description:
- 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).'
@@ -55,8 +65,9 @@
choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ]
permanent:
description:
- - "Should this configuration be in the running firewalld configuration or persist across reboots."
- required: true
+ - "Should this configuration be in the running firewalld configuration or persist across reboots. As of Ansible version 2.3, permanent operations can operate on firewalld configs when it's not running (requires firewalld >= 3.0.9)"
+ required: false
+ default: null
immediate:
description:
- "Should this configuration be applied immediately, if set as permanent"
@@ -73,39 +84,167 @@
- "The amount of time the rule should be in effect for when non-permanent."
required: false
default: 0
+ masquerade:
+ description:
+ - 'The masquerade setting you would like to enable/disable to/from zones within firewalld'
+ required: false
+ default: null
+ version_added: "2.1"
notes:
- Not tested on any Debian based system.
+ - Requires the python2 bindings of firewalld, which may not be installed by default if the distribution switched to python 3
requirements: [ 'firewalld >= 0.2.11' ]
author: "Adam Miller (@maxamillion)"
'''
EXAMPLES = '''
-- firewalld: service=https permanent=true state=enabled
-- firewalld: port=8081/tcp permanent=true state=disabled
-- firewalld: port=161-162/udp permanent=true state=enabled
-- firewalld: zone=dmz service=http permanent=true state=enabled
-- firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled
-- firewalld: source='192.168.1.0/24' zone=internal state=enabled
+- firewalld:
+ service: https
+ permanent: true
+ state: enabled
+
+- firewalld:
+ port: 8081/tcp
+ permanent: true
+ state: disabled
+
+- firewalld:
+ port: 161-162/udp
+ permanent: true
+ state: enabled
+
+- firewalld:
+ zone: dmz
+ service: http
+ permanent: true
+ state: enabled
+
+- firewalld:
+ rich_rule: 'rule service name="ftp" audit limit value="1/m" accept'
+ permanent: true
+ state: enabled
+
+- firewalld:
+ source: 192.0.2.0/24
+ zone: internal
+ state: enabled
+
+- firewalld:
+ zone: trusted
+ interface: eth2
+ permanent: true
+ state: enabled
+
+- firewalld:
+ masquerade: yes
+ state: enabled
+ permanent: true
+ zone: dmz
'''
-import os
-import re
+from ansible.module_utils.basic import AnsibleModule
+
+import sys
+
+#####################
+# Globals
+#
+fw = None
+module = None
+fw_offline = False
+Rich_Rule = None
+FirewallClientZoneSettings = None
+
+module = None
+
+#####################
+# exception handling
+#
+def action_handler(action_func, action_func_args):
+ """
+ Function to wrap calls to make actions on firewalld in try/except
+ logic and emit (hopefully) useful error messages
+ """
+
+ msgs = []
+
+ try:
+ return action_func(*action_func_args)
+ except Exception:
+ # Make python 2.4 shippable ci tests happy
+ e = sys.exc_info()[1]
+
+ # If there are any commonly known errors that we should provide more
+ # context for to help the users diagnose what's wrong. Handle that here
+ if "INVALID_SERVICE" in "%s" % e:
+ msgs.append("Services are defined by port/tcp relationship and named as they are in /etc/services (on most systems)")
+
+ if len(msgs) > 0:
+ module.fail_json(
+ msg='ERROR: Exception caught: %s %s' % (e, ', '.join(msgs))
+ )
+ else:
+ module.fail_json(msg='ERROR: Exception caught: %s' % e)
+
+#####################
+# fw_offline helpers
+#
+def get_fw_zone_settings(zone):
+ if fw_offline:
+ fw_zone = fw.config.get_zone(zone)
+ fw_settings = FirewallClientZoneSettings(
+ list(fw.config.get_zone_config(fw_zone))
+ )
+ else:
+ fw_zone = fw.config().getZoneByName(zone)
+ fw_settings = fw_zone.getSettings()
+
+ return (fw_zone, fw_settings)
+
+def update_fw_settings(fw_zone, fw_settings):
+ if fw_offline:
+ fw.config.set_zone_config(fw_zone, fw_settings.settings)
+ else:
+ fw_zone.update(fw_settings)
+
+#####################
+# masquerade handling
+#
+def get_masquerade_enabled(zone):
+ if fw.queryMasquerade(zone) == True:
+ return True
+ else:
+ return False
-try:
- import firewall.config
- FW_VERSION = firewall.config.VERSION
+def get_masquerade_enabled_permanent(zone):
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
+ if fw_settings.getMasquerade() == True:
+ return True
+ else:
+ return False
- from firewall.client import FirewallClient
- fw = FirewallClient()
- HAS_FIREWALLD = True
-except ImportError:
- HAS_FIREWALLD = False
+def set_masquerade_enabled(zone):
+ fw.addMasquerade(zone)
+
+def set_masquerade_disabled(zone):
+ fw.removeMasquerade(zone)
+
+def set_masquerade_permanent(zone, masquerade):
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
+ fw_settings.setMasquerade(masquerade)
+ update_fw_settings(fw_zone, fw_settings)
################
# port handling
#
def get_port_enabled(zone, port_proto):
- if port_proto in fw.getPorts(zone):
+ if fw_offline:
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
+ ports_list = fw_settings.getPorts()
+ else:
+ ports_list = fw.getPorts(zone)
+
+ if port_proto in ports_list:
return True
else:
return False
@@ -117,47 +256,113 @@ def set_port_disabled(zone, port, protocol):
fw.removePort(zone, port, protocol)
def get_port_enabled_permanent(zone, port_proto):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
+
if tuple(port_proto) in fw_settings.getPorts():
return True
else:
return False
def set_port_enabled_permanent(zone, port, protocol):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addPort(port, protocol)
- fw_zone.update(fw_settings)
+ update_fw_settings(fw_zone, fw_settings)
def set_port_disabled_permanent(zone, port, protocol):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removePort(port, protocol)
- fw_zone.update(fw_settings)
+ update_fw_settings(fw_zone, fw_settings)
####################
# source handling
#
def get_source(zone, source):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
if source in fw_settings.getSources():
return True
else:
return False
def add_source(zone, source):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addSource(source)
- fw_zone.update(fw_settings)
+ update_fw_settings(fw_zone, fw_settings)
def remove_source(zone, source):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeSource(source)
- fw_zone.update(fw_settings)
+ update_fw_settings(fw_zone, fw_settings)
+
+####################
+# interface handling
+#
+def get_interface(zone, interface):
+ if fw_offline:
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
+ interface_list = fw_settings.getInterfaces()
+ else:
+ interface_list = fw.getInterfaces(zone)
+ if interface in fw.getInterfaces(zone):
+ return True
+ else:
+ return False
+
+def change_zone_of_interface(zone, interface):
+ fw.changeZoneOfInterface(zone, interface)
+
+def remove_interface(zone, interface):
+ fw.removeInterface(zone, interface)
+
+def get_interface_permanent(zone, interface):
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
+
+ if interface in fw_settings.getInterfaces():
+ return True
+ else:
+ return False
+
+def change_zone_of_interface_permanent(zone, interface):
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
+ if fw_offline:
+ iface_zone_objs = [ ]
+ for zone in fw.config.get_zones():
+ old_zone_obj = fw.config.get_zone(zone)
+ if interface in old_zone_obj.interfaces:
+ iface_zone_objs.append(old_zone_obj)
+ if len(iface_zone_objs) > 1:
+ # Even it shouldn't happen, it's actually possible that
+ # the same interface is in several zone XML files
+ module.fail_json(
+ msg = 'ERROR: interface {} is in {} zone XML file, can only be in one'.format(
+ interface,
+ len(iface_zone_objs)
+ )
+ )
+ old_zone_obj = iface_zone_objs[0]
+ if old_zone_obj.name != zone:
+ old_zone_settings = FirewallClientZoneSettings(
+ fw.config.get_zone_config(old_zone_obj)
+ )
+ old_zone_settings.removeInterface(interface) # remove from old
+ fw.config.set_zone_config(old_zone_obj, old_zone_settings.settings)
+
+ fw_settings.addInterface(interface) # add to new
+ fw.config.set_zone_config(fw_zone, fw_settings.settings)
+ else:
+ old_zone_name = fw.config().getZoneOfInterface(interface)
+ if old_zone_name != zone:
+ if old_zone_name:
+ old_zone_obj = fw.config().getZoneByName(old_zone_name)
+ old_zone_settings = old_zone_obj.getSettings()
+ old_zone_settings.removeInterface(interface) # remove from old
+ old_zone_obj.update(old_zone_settings)
+ fw_settings.addInterface(interface) # add to new
+ fw_zone.update(fw_settings)
+
+def remove_interface_permanent(zone, interface):
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
+ fw_settings.removeInterface(interface)
+ update_fw_settings(fw_zone, fw_settings)
####################
# service handling
@@ -175,30 +380,30 @@ def set_service_disabled(zone, service):
fw.removeService(zone, service)
def get_service_enabled_permanent(zone, service):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
+
if service in fw_settings.getServices():
return True
else:
return False
def set_service_enabled_permanent(zone, service):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addService(service)
- fw_zone.update(fw_settings)
+ update_fw_settings(fw_zone, fw_settings)
def set_service_disabled_permanent(zone, service):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeService(service)
- fw_zone.update(fw_settings)
-
+ update_fw_settings(fw_zone, fw_settings)
####################
# rich rule handling
#
def get_rich_rule_enabled(zone, rule):
+ # Convert the rule string to standard format
+ # before checking whether it is present
+ rule = str(Rich_Rule(rule_str=rule))
if rule in fw.getRichRules(zone):
return True
else:
@@ -211,28 +416,31 @@ def set_rich_rule_disabled(zone, rule):
fw.removeRichRule(zone, rule)
def get_rich_rule_enabled_permanent(zone, rule):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
+ # Convert the rule string to standard format
+ # before checking whether it is present
+ rule = str(Rich_Rule(rule_str=rule))
if rule in fw_settings.getRichRules():
return True
else:
return False
def set_rich_rule_enabled_permanent(zone, rule):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.addRichRule(rule)
- fw_zone.update(fw_settings)
+ update_fw_settings(fw_zone, fw_settings)
def set_rich_rule_disabled_permanent(zone, rule):
- fw_zone = fw.config().getZoneByName(zone)
- fw_settings = fw_zone.getSettings()
+ fw_zone, fw_settings = get_fw_zone_settings(zone)
fw_settings.removeRichRule(rule)
- fw_zone.update(fw_settings)
-
+ update_fw_settings(fw_zone, fw_settings)
def main():
+ global module
+ ## make module global so we don't have to pass it to action_handler every
+ ## function call
+ global module
module = AnsibleModule(
argument_spec = dict(
service=dict(required=False,default=None),
@@ -244,18 +452,77 @@ def main():
permanent=dict(type='bool',required=False,default=None),
state=dict(choices=['enabled', 'disabled'], required=True),
timeout=dict(type='int',required=False,default=0),
+ interface=dict(required=False,default=None),
+ masquerade=dict(required=False,default=None),
+ offline=dict(type='bool',required=False,default=None),
),
supports_check_mode=True
)
+
+ ## Handle running (online) daemon vs non-running (offline) daemon
+ global fw
+ global fw_offline
+ global Rich_Rule
+ global FirewallClientZoneSettings
+
+ ## Imports
+ try:
+ import firewall.config
+ FW_VERSION = firewall.config.VERSION
+
+ from firewall.client import Rich_Rule
+ from firewall.client import FirewallClient
+ fw = None
+ fw_offline = False
+
+ try:
+ fw = FirewallClient()
+ fw.getDefaultZone()
+ except AttributeError:
+ ## Firewalld is not currently running, permanent-only operations
+
+ ## Import other required parts of the firewalld API
+ ##
+ ## NOTE:
+ ## online and offline operations do not share a common firewalld API
+ from firewall.core.fw_test import Firewall_test
+ from firewall.client import FirewallClientZoneSettings
+ fw = Firewall_test()
+ fw.start()
+ fw_offline = True
+
+ except ImportError:
+ ## Make python 2.4 shippable ci tests happy
+ e = sys.exc_info()[1]
+ module.fail_json(msg='firewalld and its python 2 module are required for this module, version 2.0.11 or newer required (3.0.9 or newer for offline operations) \n %s' % e)
+
+ if fw_offline:
+ ## Pre-run version checking
+ if FW_VERSION < "0.3.9":
+ module.fail_json(msg='unsupported version of firewalld, offline operations require >= 3.0.9')
+ else:
+ ## Pre-run version checking
+ if FW_VERSION < "0.2.11":
+ module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11')
+
+ ## Check for firewalld running
+ try:
+ if fw.connected == False:
+ module.fail_json(msg='firewalld service must be running, or try with offline=true')
+ except AttributeError:
+ module.fail_json(msg="firewalld connection can't be established,\
+ installed version (%s) likely too old. Requires firewalld >= 2.0.11" % FW_VERSION)
+
+
+ ## Verify required params are provided
if module.params['source'] == None and module.params['permanent'] == None:
- module.fail(msg='permanent is a required parameter')
+ module.fail_json(msg='permanent is a required parameter')
- if not HAS_FIREWALLD:
- module.fail_json(msg='firewalld required for this module')
+ if module.params['interface'] != None and module.params['zone'] == None:
+ module.fail(msg='zone is a required parameter')
- ## Pre-run version checking
- if FW_VERSION < "0.2.11":
- module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11')
+ if module.params['immediate'] and fw_offline:
+ module.fail(msg='firewall is not currently running, unable to perform immediate actions without a running firewall daemon')
## Global Vars
changed=False
@@ -274,20 +541,17 @@ def main():
if module.params['zone'] != None:
zone = module.params['zone']
else:
- zone = fw.getDefaultZone()
+ if fw_offline:
+ zone = fw.get_default_zone()
+ else:
+ zone = fw.getDefaultZone()
permanent = module.params['permanent']
desired_state = module.params['state']
immediate = module.params['immediate']
timeout = module.params['timeout']
-
- ## Check for firewalld running
- try:
- if fw.connected == False:
- module.fail_json(msg='firewalld service must be running')
- except AttributeError:
- module.fail_json(msg="firewalld connection can't be established,\
- version likely too old. Requires firewalld >= 2.0.11")
+ interface = module.params['interface']
+ masquerade = module.params['masquerade']
modification_count = 0
if service != None:
@@ -296,13 +560,66 @@ def main():
modification_count += 1
if rich_rule != None:
modification_count += 1
+ if interface != None:
+ modification_count += 1
+ if masquerade != None:
+ modification_count += 1
if modification_count > 1:
- module.fail_json(msg='can only operate on port, service or rich_rule at once')
+ module.fail_json(msg='can only operate on port, service, rich_rule or interface at once')
if service != None:
- if permanent:
- is_enabled = get_service_enabled_permanent(zone, service)
+ if immediate and permanent:
+ is_enabled_permanent = action_handler(
+ get_service_enabled_permanent,
+ (zone, service)
+ )
+ is_enabled_immediate = action_handler(
+ get_service_enabled,
+ (zone, service)
+ )
+ msgs.append('Permanent and Non-Permanent(immediate) operation')
+
+ if desired_state == "enabled":
+ if not is_enabled_permanent or not is_enabled_immediate:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if not is_enabled_permanent:
+ action_handler(
+ set_service_enabled_permanent,
+ (zone, service)
+ )
+ changed=True
+ if not is_enabled_immediate:
+ action_handler(
+ set_service_enabled,
+ (zone, service, timeout)
+ )
+ changed=True
+
+
+ elif desired_state == "disabled":
+ if is_enabled_permanent or is_enabled_immediate:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if is_enabled_permanent:
+ action_handler(
+ set_service_disabled_permanent,
+ (zone, service)
+ )
+ changed=True
+ if is_enabled_immediate:
+ action_handler(
+ set_service_disabled,
+ (zone, service)
+ )
+ changed=True
+
+ elif permanent and not immediate:
+ is_enabled = action_handler(
+ get_service_enabled_permanent,
+ (zone, service)
+ )
msgs.append('Permanent operation')
if desired_state == "enabled":
@@ -310,17 +627,26 @@ def main():
if module.check_mode:
module.exit_json(changed=True)
- set_service_enabled_permanent(zone, service)
+ action_handler(
+ set_service_enabled_permanent,
+ (zone, service)
+ )
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
- set_service_disabled_permanent(zone, service)
+ action_handler(
+ set_service_disabled_permanent,
+ (zone, service)
+ )
changed=True
- if immediate or not permanent:
- is_enabled = get_service_enabled(zone, service)
+ elif immediate and not permanent:
+ is_enabled = action_handler(
+ get_service_enabled,
+ (zone, service)
+ )
msgs.append('Non-permanent operation')
@@ -329,27 +655,35 @@ def main():
if module.check_mode:
module.exit_json(changed=True)
- set_service_enabled(zone, service, timeout)
+ action_handler(
+ set_service_enabled,
+ (zone, service, timeout)
+ )
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
- set_service_disabled(zone, service)
+ action_handler(
+ set_service_disabled,
+ (zone, service)
+ )
changed=True
if changed == True:
msgs.append("Changed service %s to %s" % (service, desired_state))
+ # FIXME - source type does not handle non-permanent mode, this was an
+ # oversight in the past.
if source != None:
- is_enabled = get_source(zone, source)
+ is_enabled = action_handler(get_source, (zone, source))
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
- add_source(zone, source)
+ action_handler(add_source, (zone, source))
changed=True
msgs.append("Added %s to zone %s" % (source, zone))
elif desired_state == "disabled":
@@ -357,12 +691,61 @@ def main():
if module.check_mode:
module.exit_json(changed=True)
- remove_source(zone, source)
+ action_handler(remove_source, (zone, source))
changed=True
msgs.append("Removed %s from zone %s" % (source, zone))
+
if port != None:
- if permanent:
- is_enabled = get_port_enabled_permanent(zone, [port, protocol])
+ if immediate and permanent:
+ is_enabled_permanent = action_handler(
+ get_port_enabled_permanent,
+ (zone,[port, protocol])
+ )
+ is_enabled_immediate = action_handler(
+ get_port_enabled,
+ (zone, [port, protocol])
+ )
+ msgs.append('Permanent and Non-Permanent(immediate) operation')
+
+ if desired_state == "enabled":
+ if not is_enabled_permanent or not is_enabled_immediate:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if not is_enabled_permanent:
+ action_handler(
+ set_port_enabled_permanent,
+ (zone, port, protocol)
+ )
+ changed=True
+ if not is_enabled_immediate:
+ action_handler(
+ set_port_enabled,
+ (zone, port, protocol, timeout)
+ )
+ changed=True
+
+ elif desired_state == "disabled":
+ if is_enabled_permanent or is_enabled_immediate:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if is_enabled_permanent:
+ action_handler(
+ set_port_disabled_permanent,
+ (zone, port, protocol)
+ )
+ changed=True
+ if is_enabled_immediate:
+ action_handler(
+ set_port_disabled,
+ (zone, port, protocol)
+ )
+ changed=True
+
+ elif permanent and not immediate:
+ is_enabled = action_handler(
+ get_port_enabled_permanent,
+ (zone, [port, protocol])
+ )
msgs.append('Permanent operation')
if desired_state == "enabled":
@@ -370,17 +753,26 @@ def main():
if module.check_mode:
module.exit_json(changed=True)
- set_port_enabled_permanent(zone, port, protocol)
+ action_handler(
+ set_port_enabled_permanent,
+ (zone, port, protocol)
+ )
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
- set_port_disabled_permanent(zone, port, protocol)
+ action_handler(
+ set_port_disabled_permanent,
+ (zone, port, protocol)
+ )
changed=True
- if immediate or not permanent:
- is_enabled = get_port_enabled(zone, [port,protocol])
+ if immediate and not permanent:
+ is_enabled = action_handler(
+ get_port_enabled,
+ (zone, [port,protocol])
+ )
msgs.append('Non-permanent operation')
if desired_state == "enabled":
@@ -388,14 +780,20 @@ def main():
if module.check_mode:
module.exit_json(changed=True)
- set_port_enabled(zone, port, protocol, timeout)
+ action_handler(
+ set_port_enabled,
+ (zone, port, protocol, timeout)
+ )
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
- set_port_disabled(zone, port, protocol)
+ action_handler(
+ set_port_disabled,
+ (zone, port, protocol)
+ )
changed=True
if changed == True:
@@ -403,8 +801,55 @@ def main():
desired_state))
if rich_rule != None:
- if permanent:
- is_enabled = get_rich_rule_enabled_permanent(zone, rich_rule)
+ if immediate and permanent:
+ is_enabled_permanent = action_handler(
+ get_rich_rule_enabled_permanent,
+ (zone, rich_rule)
+ )
+ is_enabled_immediate = action_handler(
+ get_rich_rule_enabled,
+ (zone, rich_rule)
+ )
+ msgs.append('Permanent and Non-Permanent(immediate) operation')
+
+ if desired_state == "enabled":
+ if not is_enabled_permanent or not is_enabled_immediate:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if not is_enabled_permanent:
+ action_handler(
+ set_rich_rule_enabled_permanent,
+ (zone, rich_rule)
+ )
+ changed=True
+ if not is_enabled_immediate:
+ action_handler(
+ set_rich_rule_enabled,
+ (zone, rich_rule, timeout)
+ )
+ changed=True
+
+ elif desired_state == "disabled":
+ if is_enabled_permanent or is_enabled_immediate:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if is_enabled_permanent:
+ action_handler(
+ set_rich_rule_disabled_permanent,
+ (zone, rich_rule)
+ )
+ changed=True
+ if is_enabled_immediate:
+ action_handler(
+ set_rich_rule_disabled,
+ (zone, rich_rule)
+ )
+ changed=True
+ if permanent and not immediate:
+ is_enabled = action_handler(
+ get_rich_rule_enabled_permanent,
+ (zone, rich_rule)
+ )
msgs.append('Permanent operation')
if desired_state == "enabled":
@@ -412,17 +857,26 @@ def main():
if module.check_mode:
module.exit_json(changed=True)
- set_rich_rule_enabled_permanent(zone, rich_rule)
+ action_handler(
+ set_rich_rule_enabled_permanent,
+ (zone, rich_rule)
+ )
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
- set_rich_rule_disabled_permanent(zone, rich_rule)
+ action_handler(
+ set_rich_rule_disabled_permanent,
+ (zone, rich_rule)
+ )
changed=True
- if immediate or not permanent:
- is_enabled = get_rich_rule_enabled(zone, rich_rule)
+ if immediate and not permanent:
+ is_enabled = action_handler(
+ get_rich_rule_enabled,
+ (zone, rich_rule)
+ )
msgs.append('Non-permanent operation')
if desired_state == "enabled":
@@ -430,23 +884,189 @@ def main():
if module.check_mode:
module.exit_json(changed=True)
- set_rich_rule_enabled(zone, rich_rule, timeout)
+ action_handler(
+ set_rich_rule_enabled,
+ (zone, rich_rule, timeout)
+ )
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
- set_rich_rule_disabled(zone, rich_rule)
+ action_handler(
+ set_rich_rule_disabled,
+ (zone, rich_rule)
+ )
changed=True
if changed == True:
msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state))
+ if interface != None:
+ if immediate and permanent:
+ is_enabled_permanent = action_handler(
+ get_interface_permanent,
+ (zone, interface)
+ )
+ is_enabled_immediate = action_handler(
+ get_interface,
+ (zone, interface)
+ )
+ msgs.append('Permanent and Non-Permanent(immediate) operation')
+
+ if desired_state == "enabled":
+ if not is_enabled_permanent or not is_enabled_immediate:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if not is_enabled_permanent:
+ change_zone_of_interface_permanent(zone, interface)
+ changed=True
+ if not is_enabled_immediate:
+ change_zone_of_interface(zone, interface)
+ changed=True
+ if changed:
+ msgs.append("Changed %s to zone %s" % (interface, zone))
+
+ elif desired_state == "disabled":
+ if is_enabled_permanent or is_enabled_immediate:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if is_enabled_permanent:
+ remove_interface_permanent(zone, interface)
+ changed=True
+ if is_enabled_immediate:
+ remove_interface(zone, interface)
+ changed=True
+ if changed:
+ msgs.append("Removed %s from zone %s" % (interface, zone))
+
+ elif permanent and not immediate:
+ is_enabled = action_handler(
+ get_interface_permanent,
+ (zone, interface)
+ )
+ msgs.append('Permanent operation')
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ change_zone_of_interface_permanent(zone, interface)
+ changed=True
+ msgs.append("Changed %s to zone %s" % (interface, zone))
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ remove_interface_permanent(zone, interface)
+ changed=True
+ msgs.append("Removed %s from zone %s" % (interface, zone))
+ elif immediate and not permanent:
+ is_enabled = action_handler(
+ get_interface,
+ (zone, interface)
+ )
+ msgs.append('Non-permanent operation')
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ change_zone_of_interface(zone, interface)
+ changed=True
+ msgs.append("Changed %s to zone %s" % (interface, zone))
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ remove_interface(zone, interface)
+ changed=True
+ msgs.append("Removed %s from zone %s" % (interface, zone))
+
+ if masquerade != None:
+
+ if immediate and permanent:
+ is_enabled_permanent = action_handler(
+ get_masquerade_enabled_permanent,
+ (zone)
+ )
+ is_enabled_immediate = action_handler(get_masquerade_enabled, (zone))
+ msgs.append('Permanent and Non-Permanent(immediate) operation')
+
+ if desired_state == "enabled":
+ if not is_enabled_permanent or not is_enabled_immediate:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if not is_enabled_permanent:
+ action_handler(set_masquerade_permanent, (zone, True))
+ changed=True
+ if not is_enabled_immediate:
+ action_handler(set_masquerade_enabled, (zone))
+ changed=True
+ if changed:
+ msgs.append("Added masquerade to zone %s" % (zone))
+
+ elif desired_state == "disabled":
+ if is_enabled_permanent or is_enabled_immediate:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if is_enabled_permanent:
+ action_handler(set_masquerade_permanent, (zone, False))
+ changed=True
+ if is_enabled_immediate:
+ action_handler(set_masquerade_disabled, (zone))
+ changed=True
+ if changed:
+ msgs.append("Removed masquerade from zone %s" % (zone))
+
+ elif permanent and not immediate:
+ is_enabled = action_handler(get_masquerade_enabled_permanent, (zone))
+ msgs.append('Permanent operation')
+
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ action_handler(set_masquerade_permanent, (zone, True))
+ changed=True
+ msgs.append("Added masquerade to zone %s" % (zone))
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ action_handler(set_masquerade_permanent, (zone, False))
+ changed=True
+ msgs.append("Removed masquerade from zone %s" % (zone))
+ elif immediate and not permanent:
+ is_enabled = action_handler(get_masquerade_enabled, (zone))
+ msgs.append('Non-permanent operation')
+
+ if desired_state == "enabled":
+ if is_enabled == False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ action_handler(set_masquerade_enabled, (zone))
+ changed=True
+ msgs.append("Added masquerade to zone %s" % (zone))
+ elif desired_state == "disabled":
+ if is_enabled == True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ action_handler(set_masquerade_disabled, (zone))
+ changed=True
+ msgs.append("Removed masquerade from zone %s" % (zone))
+
+ if fw_offline:
+ msgs.append("(offline operation: only on-disk configs were altered)")
module.exit_json(changed=changed, msg=', '.join(msgs))
-#################################################
-# import module snippets
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/getent.py b/system/getent.py
index 7df9e1d795f..960a1221f70 100644
--- a/system/getent.py
+++ b/system/getent.py
@@ -20,6 +20,10 @@
#
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: getent
@@ -59,27 +63,46 @@
EXAMPLES = '''
# get root user info
-- getent: database=passwd key=root
-- debug: var=getent_passwd
+- getent:
+ database: passwd
+ key: root
+- debug:
+ var: getent_passwd
# get all groups
-- getent: database=group split=':'
-- debug: var=getent_group
+- getent:
+ database: group
+ split: ':'
+- debug:
+ var: getent_group
# get all hosts, split by tab
-- getent: database=hosts
-- debug: var=getent_hosts
+- getent:
+ database: hosts
+- debug:
+ var: getent_hosts
# get http service info, no error if missing
-- getent: database=services key=http fail_key=False
-- debug: var=getent_services
+- getent:
+ database: services
+ key: http
+ fail_key: False
+- debug:
+ var: getent_services
# get user password hash (requires sudo/root)
-- getent: database=shadow key=www-data split=:
-- debug: var=getent_shadow
+- getent:
+ database: shadow
+ key: www-data
+ split: ':'
+- debug:
+ var: getent_shadow
'''
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
def main():
module = AnsibleModule(
argument_spec = dict(
@@ -110,7 +133,8 @@ def main():
try:
rc, out, err = module.run_command(cmd)
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg=str(e))
msg = "Unexpected failure!"
@@ -136,8 +160,6 @@ def main():
module.fail_json(msg=msg)
-# import module snippets
-from ansible.module_utils.basic import *
-
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/gluster_volume.py b/system/gluster_volume.py
index ff1ce9831db..7fcca45886d 100644
--- a/system/gluster_volume.py
+++ b/system/gluster_volume.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
module: gluster_volume
short_description: Manage GlusterFS volumes
@@ -56,6 +60,18 @@
default: null
description:
- Stripe count for volume
+ disperses:
+ required: false
+ default: null
+ description:
+ - Disperse count for volume
+ version_added: "2.2"
+ redundancies:
+ required: false
+ default: null
+ description:
+ - Redundancy count for volume
+ version_added: "2.2"
transport:
required: false
choices: [ 'tcp', 'rdma', 'tcp,rdma' ]
@@ -71,6 +87,7 @@
start_on_create:
choices: [ 'yes', 'no']
required: false
+ default: 'yes'
description:
- Controls whether the volume is started after creation or not, defaults to yes
rebalance:
@@ -108,32 +125,61 @@
EXAMPLES = """
- name: create gluster volume
- gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster="192.168.1.10,192.168.1.11"
+ gluster_volume:
+ state: present
+ name: test1
+ bricks: /bricks/brick1/g1
+ rebalance: yes
+ cluster:
+ - 192.0.2.10
+ - 192.0.2.11
run_once: true
- name: tune
- gluster_volume: state=present name=test1 options='{performance.cache-size: 256MB}'
+ gluster_volume:
+ state: present
+ name: test1
+ options:
+ performance.cache-size: 256MB
- name: start gluster volume
- gluster_volume: state=started name=test1
+ gluster_volume:
+ state: started
+ name: test1
- name: limit usage
- gluster_volume: state=present name=test1 directory=/foo quota=20.0MB
+ gluster_volume:
+ state: present
+ name: test1
+ directory: /foo
+ quota: 20.0MB
- name: stop gluster volume
- gluster_volume: state=stopped name=test1
+ gluster_volume:
+ state: stopped
+ name: test1
- name: remove gluster volume
- gluster_volume: state=absent name=test1
+ gluster_volume:
+ state: absent
+ name: test1
- name: create gluster volume with multiple bricks
- gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster="192.168.1.10,192.168.1.11"
+ gluster_volume:
+ state: present
+ name: test2
+ bricks: /bricks/brick1/g2,/bricks/brick2/g2
+ cluster:
+ - 192.0.2.10
+ - 192.0.2.11
run_once: true
"""
import shutil
import time
import socket
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.basic import *
glusterbin = ''
@@ -146,7 +192,8 @@ def run_gluster(gargs, **kwargs):
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
- except Exception, e:
+ except Exception:
+ e = get_exception()
module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e)))
return out
@@ -177,16 +224,24 @@ def get_peers():
hostname = None
uuid = None
state = None
+ shortNames = False
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'hostname':
hostname = value
+ shortNames = False
if key.lower() == 'uuid':
uuid = value
if key.lower() == 'state':
state = value
peers[hostname] = [ uuid, state ]
+ elif row.lower() == 'other names:':
+ shortNames = True
+ elif row != '' and shortNames == True:
+ peers[row] = [ uuid, state ]
+ elif row == '':
+ shortNames = False
return peers
def get_volumes():
@@ -249,8 +304,8 @@ def wait_for_peer(host):
def probe(host, myhostname):
global module
- run_gluster([ 'peer', 'probe', host ])
- if not wait_for_peer(host):
+ out = run_gluster([ 'peer', 'probe', host ])
+ if out.find('localhost') == -1 and not wait_for_peer(host):
module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
changed = True
@@ -258,11 +313,9 @@ def probe_all_peers(hosts, peers, myhostname):
for host in hosts:
host = host.strip() # Clean up any extra space for exact comparison
if host not in peers:
- # dont probe ourselves
- if myhostname != host:
- probe(host, myhostname)
+ probe(host, myhostname)
-def create_volume(name, stripe, replica, transport, hosts, bricks, force):
+def create_volume(name, stripe, replica, disperse, redundancy, transport, hosts, bricks, force):
args = [ 'volume', 'create' ]
args.append(name)
if stripe:
@@ -271,6 +324,12 @@ def create_volume(name, stripe, replica, transport, hosts, bricks, force):
if replica:
args.append('replica')
args.append(str(replica))
+ if disperse:
+ args.append('disperse')
+ args.append(str(disperse))
+ if redundancy:
+ args.append('redundancy')
+ args.append(str(redundancy))
args.append('transport')
args.append(transport)
for brick in bricks:
@@ -289,8 +348,15 @@ def stop_volume(name):
def set_volume_option(name, option, parameter):
run_gluster([ 'volume', 'set', name, option, parameter ])
-def add_brick(name, brick, force):
- args = [ 'volume', 'add-brick', name, brick ]
+def add_bricks(name, new_bricks, stripe, replica, force):
+ args = [ 'volume', 'add-brick', name ]
+ if stripe:
+ args.append('stripe')
+ args.append(str(stripe))
+ if replica:
+ args.append('replica')
+ args.append(str(replica))
+ args.extend(new_bricks)
if force:
args.append('force')
run_gluster(args)
@@ -317,6 +383,8 @@ def main():
host=dict(required=False, default=None),
stripes=dict(required=False, default=None, type='int'),
replicas=dict(required=False, default=None, type='int'),
+ disperses=dict(required=False, default=None, type='int'),
+ redundancies=dict(required=False, default=None, type='int'),
transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]),
bricks=dict(required=False, default=None, aliases=['brick']),
start_on_create=dict(required=False, default=True, type='bool'),
@@ -339,6 +407,8 @@ def main():
brick_paths = module.params['bricks']
stripes = module.params['stripes']
replicas = module.params['replicas']
+ disperses = module.params['disperses']
+ redundancies = module.params['redundancies']
transport = module.params['transport']
myhostname = module.params['host']
start_on_create = module.boolean(module.params['start_on_create'])
@@ -350,9 +420,12 @@ def main():
# Clean up if last element is empty. Consider that yml can look like this:
# cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
- if cluster != None and cluster[-1] == '':
+ if cluster != None and len(cluster) > 1 and cluster[-1] == '':
cluster = cluster[0:-1]
+ if cluster == None or cluster[0] == '':
+ cluster = [myhostname]
+
if brick_paths != None and "," in brick_paths:
brick_paths = brick_paths.split(",")
else:
@@ -383,7 +456,7 @@ def main():
# create if it doesn't exist
if volume_name not in volumes:
- create_volume(volume_name, stripes, replicas, transport, cluster, brick_paths, force)
+ create_volume(volume_name, stripes, replicas, disperses, redundancies, transport, cluster, brick_paths, force)
volumes = get_volumes()
changed = True
@@ -408,8 +481,8 @@ def main():
if brick not in all_bricks:
removed_bricks.append(brick)
- for brick in new_bricks:
- add_brick(volume_name, brick, force)
+ if new_bricks:
+ add_bricks(volume_name, new_bricks, stripes, replicas, force)
changed = True
# handle quotas
@@ -430,7 +503,7 @@ def main():
else:
module.fail_json(msg='failed to create volume %s' % volume_name)
- if volume_name not in volumes:
+ if action != 'delete' and volume_name not in volumes:
module.fail_json(msg='volume not found %s' % volume_name)
if action == 'started':
@@ -453,6 +526,5 @@ def main():
module.exit_json(changed=changed, ansible_facts=facts)
-# import module snippets
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/iptables.py b/system/iptables.py
new file mode 100644
index 00000000000..521ad6b043a
--- /dev/null
+++ b/system/iptables.py
@@ -0,0 +1,564 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Linus Unnebäck
+#
+# This file is part of Ansible
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+BINS = dict(
+ ipv4='iptables',
+ ipv6='ip6tables',
+)
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: iptables
+short_description: Modify the systems iptables
+requirements: []
+version_added: "2.0"
+author: Linus Unnebäck (@LinusU)
+description:
+ - Iptables is used to set up, maintain, and inspect the tables of IP packet
+ filter rules in the Linux kernel. This module does not handle the saving
+ and/or loading of rules, but rather only manipulates the current rules
+ that are present in memory. This is the same as the behaviour of the
+ "iptables" and "ip6tables" command which this module uses internally.
+notes:
+ - This module just deals with individual rules. If you need advanced
+ chaining of rules the recommended way is to template the iptables restore
+ file.
+options:
+ table:
+ description:
+ - This option specifies the packet matching table which the command
+ should operate on. If the kernel is configured with automatic module
+ loading, an attempt will be made to load the appropriate module for
+ that table if it is not already there.
+ required: false
+ default: filter
+ choices: [ "filter", "nat", "mangle", "raw", "security" ]
+ state:
+ description:
+ - Whether the rule should be absent or present.
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ action:
+ version_added: "2.2"
+ description:
+ - Whether the rule should be appended at the bottom or inserted at the
+ top. If the rule already exists the chain won't be modified.
+ required: false
+ default: append
+ choices: [ "append", "insert" ]
+ ip_version:
+ description:
+ - Which version of the IP protocol this rule should apply to.
+ required: false
+ default: ipv4
+ choices: [ "ipv4", "ipv6" ]
+ chain:
+ description:
+ - "Chain to operate on. This option can either be the name of a user
+ defined chain or any of the builtin chains: 'INPUT', 'FORWARD',
+ 'OUTPUT', 'PREROUTING', 'POSTROUTING', 'SECMARK', 'CONNSECMARK'."
+ required: false
+ protocol:
+ description:
+ - The protocol of the rule or of the packet to check. The specified
+ protocol can be one of tcp, udp, udplite, icmp, esp, ah, sctp or the
+ special keyword "all", or it can be a numeric value, representing one
+ of these protocols or a different one. A protocol name from
+ /etc/protocols is also allowed. A "!" argument before the protocol
+ inverts the test. The number zero is equivalent to all. "all" will
+ match with all protocols and is taken as default when this option is
+ omitted.
+ required: false
+ default: null
+ source:
+ description:
+ - Source specification. Address can be either a network name,
+ a hostname, a network IP address (with /mask), or a plain IP address.
+ Hostnames will be resolved once only, before the rule is submitted to
+ the kernel. Please note that specifying any name to be resolved with
+ a remote query such as DNS is a really bad idea. The mask can be
+ either a network mask or a plain number, specifying the number of 1's
+ at the left side of the network mask. Thus, a mask of 24 is equivalent
+ to 255.255.255.0. A "!" argument before the address specification
+ inverts the sense of the address.
+ required: false
+ default: null
+ destination:
+ description:
+ - Destination specification. Address can be either a network name,
+ a hostname, a network IP address (with /mask), or a plain IP address.
+ Hostnames will be resolved once only, before the rule is submitted to
+ the kernel. Please note that specifying any name to be resolved with
+ a remote query such as DNS is a really bad idea. The mask can be
+ either a network mask or a plain number, specifying the number of 1's
+ at the left side of the network mask. Thus, a mask of 24 is equivalent
+ to 255.255.255.0. A "!" argument before the address specification
+ inverts the sense of the address.
+ required: false
+ default: null
+ match:
+ description:
+ - Specifies a match to use, that is, an extension module that tests for
+ a specific property. The set of matches make up the condition under
+ which a target is invoked. Matches are evaluated first to last if
+ specified as an array and work in short-circuit fashion, i.e. if one
+ extension yields false, evaluation will stop.
+ required: false
+ default: []
+ jump:
+ description:
+ - This specifies the target of the rule; i.e., what to do if the packet
+ matches it. The target can be a user-defined chain (other than the one
+ this rule is in), one of the special builtin targets which decide the
+ fate of the packet immediately, or an extension (see EXTENSIONS
+ below). If this option is omitted in a rule (and the goto paramater
+ is not used), then matching the rule will have no effect on the
+ packet's fate, but the counters on the rule will be incremented.
+ required: false
+ default: null
+ goto:
+ description:
+ - This specifies that the processing should continue in a user specified
+ chain. Unlike the jump argument return will not continue processing in
+ this chain but instead in the chain that called us via jump.
+ required: false
+ default: null
+ in_interface:
+ description:
+ - Name of an interface via which a packet was received (only for packets
+ entering the INPUT, FORWARD and PREROUTING chains). When the "!"
+ argument is used before the interface name, the sense is inverted. If
+ the interface name ends in a "+", then any interface which begins with
+ this name will match. If this option is omitted, any interface name
+ will match.
+ required: false
+ default: null
+ out_interface:
+ description:
+ - Name of an interface via which a packet is going to be sent (for
+ packets entering the FORWARD, OUTPUT and POSTROUTING chains). When the
+ "!" argument is used before the interface name, the sense is inverted.
+ If the interface name ends in a "+", then any interface which begins
+ with this name will match. If this option is omitted, any interface
+ name will match.
+ required: false
+ default: null
+ fragment:
+ description:
+ - This means that the rule only refers to second and further fragments
+ of fragmented packets. Since there is no way to tell the source or
+ destination ports of such a packet (or ICMP type), such a packet will
+ not match any rules which specify them. When the "!" argument precedes
+ fragment argument, the rule will only match head fragments, or
+ unfragmented packets.
+ required: false
+ default: null
+ set_counters:
+ description:
+ - This enables the administrator to initialize the packet and byte
+ counters of a rule (during INSERT, APPEND, REPLACE operations).
+ required: false
+ default: null
+ source_port:
+ description:
+ - "Source port or port range specification. This can either be a service
+ name or a port number. An inclusive range can also be specified, using
+ the format first:last. If the first port is omitted, '0' is assumed;
+ if the last is omitted, '65535' is assumed. If the first port is
+ greater than the second one they will be swapped."
+ required: false
+ default: null
+ destination_port:
+ description:
+ - "Destination port or port range specification. This can either be
+ a service name or a port number. An inclusive range can also be
+ specified, using the format first:last. If the first port is omitted,
+ '0' is assumed; if the last is omitted, '65535' is assumed. If the
+ first port is greater than the second one they will be swapped."
+ required: false
+ default: null
+ to_ports:
+ description:
+ - "This specifies a destination port or range of ports to use: without
+ this, the destination port is never altered. This is only valid if the
+ rule also specifies one of the following protocols: tcp, udp, dccp or
+ sctp."
+ required: false
+ default: null
+ to_destination:
+ version_added: "2.1"
+ description:
+ - "This specifies a destination address to use with DNAT: without
+ this, the destination address is never altered."
+ required: false
+ default: null
+ to_source:
+ version_added: "2.2"
+ description:
+ - "This specifies a source address to use with SNAT: without
+ this, the source address is never altered."
+ required: false
+ default: null
+ set_dscp_mark:
+ version_added: "2.1"
+ description:
+ - "This allows specifying a DSCP mark to be added to packets.
+ It takes either an integer or hex value. Mutually exclusive with
+ C(set_dscp_mark_class)."
+ required: false
+ default: null
+ set_dscp_mark_class:
+ version_added: "2.1"
+ description:
+ - "This allows specifying a predefined DiffServ class which will be
+ translated to the corresponding DSCP mark. Mutually exclusive with
+ C(set_dscp_mark)."
+ required: false
+ default: null
+ comment:
+ description:
+ - "This specifies a comment that will be added to the rule"
+ required: false
+ default: null
+ ctstate:
+ description:
+ - "ctstate is a list of the connection states to match in the conntrack
+ module.
+ Possible states are: 'INVALID', 'NEW', 'ESTABLISHED', 'RELATED',
+ 'UNTRACKED', 'SNAT', 'DNAT'"
+ required: false
+ default: []
+ limit:
+ description:
+ - "Specifies the maximum average number of matches to allow per second.
+ The number can specify units explicitly, using `/second', `/minute',
+ `/hour' or `/day', or parts of them (so `5/second' is the same as
+ `5/s')."
+ required: false
+ default: null
+ limit_burst:
+ version_added: "2.1"
+ description:
+ - "Specifies the maximum burst before the above limit kicks in."
+ required: false
+ default: null
+ uid_owner:
+ version_added: "2.1"
+ description:
+ - "Specifies the UID or username to use in match by owner rule."
+ required: false
+ reject_with:
+ version_added: "2.1"
+ description:
+ - "Specifies the error packet type to return while rejecting."
+ required: false
+ icmp_type:
+ version_added: "2.2"
+ description:
+ - "This allows specification of the ICMP type, which can be a numeric
+ ICMP type, type/code pair, or one of the ICMP type names shown by the
+ command 'iptables -p icmp -h'"
+ required: false
+ flush:
+ version_added: "2.2"
+ description:
+ - "Flushes the specified table and chain of all rules. If no chain is
+ specified then the entire table is purged. Ignores all other
+ parameters."
+ required: false
+ policy:
+ version_added: "2.2"
+ description:
+ - "Set the policy for the chain to the given target. Valid targets are
+ ACCEPT, DROP, QUEUE, RETURN. Only built in chains can have policies.
+ This parameter requires the chain parameter. Ignores all other
+ parameters."
+'''
+
+EXAMPLES = '''
+# Block specific IP
+- iptables:
+ chain: INPUT
+ source: 8.8.8.8
+ jump: DROP
+ become: yes
+
+# Forward port 80 to 8600
+- iptables:
+ table: nat
+ chain: PREROUTING
+ in_interface: eth0
+ protocol: tcp
+ match: tcp
+ destination_port: 80
+ jump: REDIRECT
+ to_ports: 8600
+ comment: Redirect web traffic to port 8600
+ become: yes
+
+# Allow related and established connections
+- iptables:
+ chain: INPUT
+ ctstate: ESTABLISHED,RELATED
+ jump: ACCEPT
+ become: yes
+
+# Tag all outbound tcp packets with DSCP mark 8
+- iptables:
+ chain: OUTPUT
+ jump: DSCP
+ table: mangle
+ set_dscp_mark: 8
+ protocol: tcp
+
+# Tag all outbound tcp packets with DSCP DiffServ class CS1
+- iptables:
+ chain: OUTPUT
+ jump: DSCP
+ table: mangle
+ set_dscp_mark_class: CS1
+ protocol: tcp
+'''
+
+def append_param(rule, param, flag, is_list):
+ if is_list:
+ for item in param:
+ append_param(rule, item, flag, False)
+ else:
+ if param is not None:
+ rule.extend([flag, param])
+
+
+def append_csv(rule, param, flag):
+ if param:
+ rule.extend([flag, ','.join(param)])
+
+
+def append_match(rule, param, match):
+ if param:
+ rule.extend(['-m', match])
+
+
+def append_jump(rule, param, jump):
+ if param:
+ rule.extend(['-j', jump])
+
+
+def construct_rule(params):
+ rule = []
+ append_param(rule, params['protocol'], '-p', False)
+ append_param(rule, params['source'], '-s', False)
+ append_param(rule, params['destination'], '-d', False)
+ append_param(rule, params['match'], '-m', True)
+ append_param(rule, params['jump'], '-j', False)
+ append_param(rule, params['to_destination'], '--to-destination', False)
+ append_param(rule, params['to_source'], '--to-source', False)
+ append_param(rule, params['goto'], '-g', False)
+ append_param(rule, params['in_interface'], '-i', False)
+ append_param(rule, params['out_interface'], '-o', False)
+ append_param(rule, params['fragment'], '-f', False)
+ append_param(rule, params['set_counters'], '-c', False)
+ append_param(rule, params['source_port'], '--source-port', False)
+ append_param(rule, params['destination_port'], '--destination-port', False)
+ append_param(rule, params['to_ports'], '--to-ports', False)
+ append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
+ append_param(
+ rule,
+ params['set_dscp_mark_class'],
+ '--set-dscp-class',
+ False)
+ append_match(rule, params['comment'], 'comment')
+ append_param(rule, params['comment'], '--comment', False)
+ append_match(rule, params['ctstate'], 'state')
+ append_csv(rule, params['ctstate'], '--state')
+ append_match(rule, params['limit'] or params['limit_burst'], 'limit')
+ append_param(rule, params['limit'], '--limit', False)
+ append_param(rule, params['limit_burst'], '--limit-burst', False)
+ append_match(rule, params['uid_owner'], 'owner')
+ append_param(rule, params['uid_owner'], '--uid-owner', False)
+ append_jump(rule, params['reject_with'], 'REJECT')
+ append_param(rule, params['reject_with'], '--reject-with', False)
+ append_param(rule, params['icmp_type'], '--icmp-type', False)
+ return rule
+
+
+def push_arguments(iptables_path, action, params, make_rule=True):
+ cmd = [iptables_path]
+ cmd.extend(['-t', params['table']])
+ cmd.extend([action, params['chain']])
+ if make_rule:
+ cmd.extend(construct_rule(params))
+ return cmd
+
+
+def check_present(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-C', params)
+ rc, _, __ = module.run_command(cmd, check_rc=False)
+ return (rc == 0)
+
+
+def append_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-A', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def insert_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-I', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def remove_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-D', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def flush_table(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
+ module.run_command(cmd, check_rc=True)
+
+
+def set_chain_policy(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
+ cmd.append(params['policy'])
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ table=dict(
+ required=False,
+ default='filter',
+ choices=['filter', 'nat', 'mangle', 'raw', 'security']),
+ state=dict(
+ required=False,
+ default='present',
+ choices=['present', 'absent']),
+ action=dict(
+ required=False,
+ default='append',
+ type='str',
+ choices=['append', 'insert']),
+ ip_version=dict(
+ required=False,
+ default='ipv4',
+ choices=['ipv4', 'ipv6']),
+ chain=dict(required=False, default=None, type='str'),
+ protocol=dict(required=False, default=None, type='str'),
+ source=dict(required=False, default=None, type='str'),
+ to_source=dict(required=False, default=None, type='str'),
+ destination=dict(required=False, default=None, type='str'),
+ to_destination=dict(required=False, default=None, type='str'),
+ match=dict(required=False, default=[], type='list'),
+ jump=dict(required=False, default=None, type='str'),
+ goto=dict(required=False, default=None, type='str'),
+ in_interface=dict(required=False, default=None, type='str'),
+ out_interface=dict(required=False, default=None, type='str'),
+ fragment=dict(required=False, default=None, type='str'),
+ set_counters=dict(required=False, default=None, type='str'),
+ source_port=dict(required=False, default=None, type='str'),
+ destination_port=dict(required=False, default=None, type='str'),
+ to_ports=dict(required=False, default=None, type='str'),
+ set_dscp_mark=dict(required=False, default=None, type='str'),
+ set_dscp_mark_class=dict(required=False, default=None, type='str'),
+ comment=dict(required=False, default=None, type='str'),
+ ctstate=dict(required=False, default=[], type='list'),
+ limit=dict(required=False, default=None, type='str'),
+ limit_burst=dict(required=False, default=None, type='str'),
+ uid_owner=dict(required=False, default=None, type='str'),
+ reject_with=dict(required=False, default=None, type='str'),
+ icmp_type=dict(required=False, default=None, type='str'),
+ flush=dict(required=False, default=False, type='bool'),
+ policy=dict(
+ required=False,
+ default=None,
+ type='str',
+ choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
+ ),
+ mutually_exclusive=(
+ ['set_dscp_mark', 'set_dscp_mark_class'],
+ ['flush', 'policy'],
+ ),
+ )
+ args = dict(
+ changed=False,
+ failed=False,
+ ip_version=module.params['ip_version'],
+ table=module.params['table'],
+ chain=module.params['chain'],
+ flush=module.params['flush'],
+ rule=' '.join(construct_rule(module.params)),
+ state=module.params['state'],
+ )
+
+ ip_version = module.params['ip_version']
+ iptables_path = module.get_bin_path(BINS[ip_version], True)
+
+ # Check if chain option is required
+ if args['flush'] is False and args['chain'] is None:
+ module.fail_json(
+ msg="Either chain or flush parameter must be specified.")
+
+ # Flush the table
+ if args['flush'] is True:
+ flush_table(iptables_path, module, module.params)
+ module.exit_json(**args)
+
+ # Set the policy
+ if module.params['policy']:
+ set_chain_policy(iptables_path, module, module.params)
+ module.exit_json(**args)
+
+ insert = (module.params['action'] == 'insert')
+ rule_is_present = check_present(iptables_path, module, module.params)
+ should_be_present = (args['state'] == 'present')
+
+ # Check if target is up to date
+ args['changed'] = (rule_is_present != should_be_present)
+
+ # Check only; don't modify
+ if module.check_mode:
+ module.exit_json(changed=args['changed'])
+
+ # Target is already up to date
+ if args['changed'] is False:
+ module.exit_json(**args)
+
+ if should_be_present:
+ if insert:
+ insert_rule(iptables_path, module, module.params)
+ else:
+ append_rule(iptables_path, module, module.params)
+ else:
+ remove_rule(iptables_path, module, module.params)
+
+ module.exit_json(**args)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/system/kernel_blacklist.py b/system/kernel_blacklist.py
index 296a082a2ea..5498f10b3a1 100644
--- a/system/kernel_blacklist.py
+++ b/system/kernel_blacklist.py
@@ -22,6 +22,10 @@
import re
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: kernel_blacklist
@@ -52,7 +56,9 @@
EXAMPLES = '''
# Blacklist the nouveau driver module
-- kernel_blacklist: name=nouveau state=present
+- kernel_blacklist:
+ name: nouveau
+ state: present
'''
@@ -138,4 +144,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/system/known_hosts.py b/system/known_hosts.py
index 7592574d4e7..69210d9fdf2 100644
--- a/system/known_hosts.py
+++ b/system/known_hosts.py
@@ -18,14 +18,19 @@
along with this module. If not, see .
"""
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: known_hosts
short_description: Add or remove a host from the C(known_hosts) file
description:
- - The M(known_hosts) module lets you add or remove a host from the C(known_hosts) file.
- This is useful if you're going to want to use the M(git) module over ssh, for example.
- If you have a very large number of host keys to manage, you will find the M(template) module more useful.
+ - The M(known_hosts) module lets you add or remove a host keys from the C(known_hosts) file.
+ - Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh.
+ This is useful if you're going to want to use the M(git) module over ssh, for example.
+ - If you have a very large number of host keys to manage, you will find the M(template) module more useful.
version_added: "1.9"
options:
name:
@@ -36,7 +41,7 @@
default: null
key:
description:
- - The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed)
+ - The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed). The key must be in the right format for ssh (see sshd(1), section "SSH_KNOWN_HOSTS FILE FORMAT")
required: false
default: null
path:
@@ -44,9 +49,15 @@
- The known_hosts file to edit
required: no
default: "(homedir)+/.ssh/known_hosts"
+ hash_host:
+ description:
+ - Hash the hostname in the known_hosts file
+ required: no
+ default: no
+ version_added: "2.3"
state:
description:
- - I(present) to add the host, I(absent) to remove it.
+ - I(present) to add the host key, I(absent) to remove it.
choices: [ "present", "absent" ]
required: no
default: present
@@ -55,11 +66,11 @@
'''
EXAMPLES = '''
-# Example using with_file to set the system known_hosts file
- name: tell the host about our servers it might want to ssh to
- known_hosts: path='/etc/ssh/ssh_known_hosts'
- name='foo.com.invalid'
- key="{{ lookup('file', 'pubkeys/foo.com.invalid') }}"
+ known_hosts:
+ path: /etc/ssh/ssh_known_hosts
+ name: foo.com.invalid
+ key: "{{ lookup('file', 'pubkeys/foo.com.invalid') }}"
'''
# Makes sure public host keys are present or absent in the given known_hosts
@@ -70,12 +81,16 @@
# name = hostname whose key should be added (alias: host)
# key = line(s) to add to known_hosts file
# path = the known_hosts file to edit (default: ~/.ssh/known_hosts)
+# hash_host = yes|no (default: no) hash the hostname in the known_hosts file
# state = absent|present (default: present)
import os
import os.path
import tempfile
import errno
+import re
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.basic import *
def enforce_state(module, params):
"""
@@ -85,15 +100,14 @@ def enforce_state(module, params):
host = params["name"]
key = params.get("key",None)
port = params.get("port",None)
- #expand the path parameter; otherwise module.add_path_info
- #(called by exit_json) unhelpfully says the unexpanded path is absent.
- path = os.path.expanduser(params.get("path"))
+ path = params.get("path")
+ hash_host = params.get("hash_host")
state = params.get("state")
#Find the ssh-keygen binary
sshkeygen = module.get_bin_path("ssh-keygen",True)
- #trailing newline in files gets lost, so re-add if necessary
- if key is not None and key[-1]!='\n':
+ # Trailing newline in files gets lost, so re-add if necessary
+ if key and key[-1] != '\n':
key+='\n'
if key is None and state != "absent":
@@ -101,27 +115,28 @@ def enforce_state(module, params):
sanity_check(module,host,key,sshkeygen)
- current,replace=search_for_host_key(module,host,key,path,sshkeygen)
+ found,replace_or_add,found_line,key=search_for_host_key(module,host,key,hash_host,path,sshkeygen)
- #We will change state if current==True & state!="present"
- #or current==False & state=="present"
- #i.e (current) XOR (state=="present")
+ #We will change state if found==True & state!="present"
+ #or found==False & state=="present"
+ #i.e found XOR (state=="present")
#Alternatively, if replace is true (i.e. key present, and we must change it)
if module.check_mode:
- module.exit_json(changed = replace or ((state=="present") != current))
+ module.exit_json(changed = replace_or_add or (state=="present") != found)
#Now do the work.
- #First, remove an extant entry if required
- if replace==True or (current==True and state=="absent"):
- module.run_command([sshkeygen,'-R',host,'-f',path],
- check_rc=True)
+ #Only remove whole host if found and no key provided
+ if found and key is None and state=="absent":
+ module.run_command([sshkeygen,'-R',host,'-f',path], check_rc=True)
params['changed'] = True
+
#Next, add a new (or replacing) entry
- if replace==True or (current==False and state=="present"):
+ if replace_or_add or found != (state=="present"):
try:
inf=open(path,"r")
- except IOError, e:
+ except IOError:
+ e = get_exception()
if e.errno == errno.ENOENT:
inf=None
else:
@@ -130,13 +145,17 @@ def enforce_state(module, params):
try:
outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path))
if inf is not None:
- for line in inf:
+ for line_number, line in enumerate(inf, start=1):
+ if found_line==line_number and (replace_or_add or state=='absent'):
+ continue # skip this line to replace its key
outf.write(line)
inf.close()
- outf.write(key)
+ if state == 'present':
+ outf.write(key)
outf.flush()
module.atomic_move(outf.name,path)
- except (IOError,OSError),e:
+ except (IOError,OSError):
+ e = get_exception()
module.fail_json(msg="Failed to write to file %s: %s" % \
(path,str(e)))
@@ -170,7 +189,8 @@ def sanity_check(module,host,key,sshkeygen):
outf=tempfile.NamedTemporaryFile()
outf.write(key)
outf.flush()
- except IOError,e:
+ except IOError:
+ e = get_exception()
module.fail_json(msg="Failed to write to temporary file %s: %s" % \
(outf.name,str(e)))
rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,
@@ -184,55 +204,94 @@ def sanity_check(module,host,key,sshkeygen):
if stdout=='': #host not found
module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
-def search_for_host_key(module,host,key,path,sshkeygen):
- '''search_for_host_key(module,host,key,path,sshkeygen) -> (current,replace)
+def search_for_host_key(module,host,key,hash_host,path,sshkeygen):
+ '''search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line)
- Looks up host in the known_hosts file path; if it's there, looks to see
+ Looks up host and keytype in the known_hosts file path; if it's there, looks to see
if one of those entries matches key. Returns:
- current (Boolean): is host found in path?
- replace (Boolean): is the key in path different to that supplied by user?
- if current=False, then replace is always False.
+ found (Boolean): is host found in path?
+ replace_or_add (Boolean): is the key in path different to that supplied by user?
+ found_line (int or None): the line where a key of the same type was found
+ if found=False, then replace is always False.
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
'''
- replace=False
if os.path.exists(path)==False:
- return False, False
+ return False, False, None, key
+
+ sshkeygen_command=[sshkeygen,'-F',host,'-f',path]
+
#openssh >=6.4 has changed ssh-keygen behaviour such that it returns
#1 if no host is found, whereas previously it returned 0
- rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,'-f',path],
+ rc,stdout,stderr=module.run_command(sshkeygen_command,
check_rc=False)
if stdout=='' and stderr=='' and (rc==0 or rc==1):
- return False, False #host not found, no other errors
+ return False, False, None, key #host not found, no other errors
if rc!=0: #something went wrong
module.fail_json(msg="ssh-keygen failed (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr))
-#If user supplied no key, we don't want to try and replace anything with it
+ #If user supplied no key, we don't want to try and replace anything with it
if key is None:
- return True, False
+ return True, False, None, key
lines=stdout.split('\n')
- k=key.strip() #trim trailing newline
- #ssh-keygen returns only the host we ask about in the host field,
- #even if the key entry has multiple hosts. Emulate this behaviour here,
- #otherwise we get false negatives.
- #Only necessary for unhashed entries.
- if k[0] !='|':
- k=k.split()
- #The optional "marker" field, used for @cert-authority or @revoked
- if k[0][0] == '@':
- k[1]=host
- else:
- k[0]=host
- k=' '.join(k)
- for l in lines:
- if l=='':
- continue
- if l[0]=='#': #comment
+ new_key = normalize_known_hosts_key(key)
+
+ sshkeygen_command.insert(1,'-H')
+ rc,stdout,stderr=module.run_command(sshkeygen_command,check_rc=False)
+ if rc!=0: #something went wrong
+ module.fail_json(msg="ssh-keygen failed to hash host (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr))
+ hashed_lines=stdout.split('\n')
+
+ for lnum,l in enumerate(lines):
+ if l=='':
continue
- if k==l: #found a match
- return True, False #current, not-replace
- #No match found, return current and replace
- return True, True
+ elif l[0]=='#': # info output from ssh-keygen; contains the line number where key was found
+ try:
+ # This output format has been hardcoded in ssh-keygen since at least OpenSSH 4.0
+ # It always outputs the non-localized comment before the found key
+ found_line = int(re.search(r'found: line (\d+)', l).group(1))
+ except IndexError:
+ e = get_exception()
+ module.fail_json(msg="failed to parse output of ssh-keygen for line number: '%s'" % l)
+ else:
+ found_key = normalize_known_hosts_key(l)
+ if hash_host==True:
+ if found_key['host'][:3]=='|1|':
+ new_key['host']=found_key['host']
+ else:
+ hashed_host=normalize_known_hosts_key(hashed_lines[lnum])
+ found_key['host']=hashed_host['host']
+ key=key.replace(host,found_key['host'])
+ if new_key==found_key: #found a match
+ return True, False, found_line, key #found exactly the same key, don't replace
+ elif new_key['type'] == found_key['type']: # found a different key for the same key type
+ return True, True, found_line, key
+ #No match found, return found and replace, but no line
+ return True, True, None, key
+
+def normalize_known_hosts_key(key):
+ '''
+ Transform a key, either taken from a known_host file or provided by the
+ user, into a normalized form.
+ The host part (which might include multiple hostnames or be hashed) gets
+ replaced by the provided host. Also, any spurious information gets removed
+ from the end (like the username@host tag usually present in hostkeys, but
+ absent in known_hosts files)
+ '''
+ k=key.strip() #trim trailing newline
+ k=key.split()
+ d = dict()
+ #The optional "marker" field, used for @cert-authority or @revoked
+ if k[0][0] == '@':
+ d['options'] = k[0]
+ d['host']=k[1]
+ d['type']=k[2]
+ d['key']=k[3]
+ else:
+ d['host']=k[0]
+ d['type']=k[1]
+ d['key']=k[2]
+ return d
def main():
@@ -240,7 +299,8 @@ def main():
argument_spec = dict(
name = dict(required=True, type='str', aliases=['host']),
key = dict(required=False, type='str'),
- path = dict(default="~/.ssh/known_hosts", type='str'),
+ path = dict(default="~/.ssh/known_hosts", type='path'),
+ hash_host = dict(required=False, type='bool' ,default=False),
state = dict(default='present', choices=['absent','present']),
),
supports_check_mode = True
@@ -249,6 +309,5 @@ def main():
results = enforce_state(module,module.params)
module.exit_json(**results)
-# import module snippets
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/locale_gen.py b/system/locale_gen.py
index e17ed5581da..b56a5e498e2 100644
--- a/system/locale_gen.py
+++ b/system/locale_gen.py
@@ -15,10 +15,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import os
-import os.path
-from subprocess import Popen, PIPE, call
-import re
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -45,9 +45,19 @@
EXAMPLES = '''
# Ensure a locale exists.
-- locale_gen: name=de_CH.UTF-8 state=present
+- locale_gen:
+ name: de_CH.UTF-8
+ state: present
'''
+import os
+import os.path
+from subprocess import Popen, PIPE, call
+import re
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
LOCALE_NORMALIZATION = {
".utf8": ".UTF-8",
".eucjp": ".EUC-JP",
@@ -94,7 +104,7 @@ def is_present(name):
def fix_case(name):
"""locale -a might return the encoding in either lower or upper case.
Passing through this function makes them uniform for comparisons."""
- for s, r in LOCALE_NORMALIZATION.iteritems():
+ for s, r in LOCALE_NORMALIZATION.items():
name = name.replace(s, r)
return name
@@ -225,12 +235,12 @@ def main():
apply_change(state, name)
else:
apply_change_ubuntu(state, name)
- except EnvironmentError, e:
+ except EnvironmentError:
+ e = get_exception()
module.fail_json(msg=e.strerror, exitValue=e.errno)
module.exit_json(name=name, changed=changed, msg="OK")
-# import module snippets
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/lvg.py b/system/lvg.py
index 9e3ba2d2931..9c638f4d317 100644
--- a/system/lvg.py
+++ b/system/lvg.py
@@ -19,6 +19,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
author: "Alexander Bulimov (@abulimov)"
@@ -35,6 +39,7 @@
pvs:
description:
- List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group.
+ - The module will take care of running pvcreate if needed.
required: false
pesize:
description:
@@ -65,17 +70,24 @@
EXAMPLES = '''
# Create a volume group on top of /dev/sda1 with physical extent size = 32MB.
-- lvg: vg=vg.services pvs=/dev/sda1 pesize=32
+- lvg:
+ vg: vg.services
+ pvs: /dev/sda1
+ pesize: 32
# Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
# If, for example, we already have VG vg.services on top of /dev/sdb1,
# this VG will be extended by /dev/sdc5. Or if vg.services was created on
# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
# and then reduce by /dev/sda5.
-- lvg: vg=vg.services pvs=/dev/sdb1,/dev/sdc5
+- lvg:
+ vg: vg.services
+ pvs: /dev/sdb1,/dev/sdc5
# Remove a volume group with name vg.services.
-- lvg: vg=vg.services state=absent
+- lvg:
+ vg: vg.services
+ state: absent
'''
def parse_vgs(data):
@@ -130,6 +142,7 @@ def main():
pesize = module.params['pesize']
vgoptions = module.params['vg_options'].split()
+ dev_list = []
if module.params['pvs']:
dev_list = module.params['pvs']
elif state == 'present':
@@ -183,7 +196,7 @@ def main():
### create PV
pvcreate_cmd = module.get_bin_path('pvcreate', True)
for current_dev in dev_list:
- rc,_,err = module.run_command("%s %s" % (pvcreate_cmd,current_dev))
+ rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd,current_dev))
if rc == 0:
changed = True
else:
@@ -224,7 +237,7 @@ def main():
### create PV
pvcreate_cmd = module.get_bin_path('pvcreate', True)
for current_dev in devs_to_add:
- rc,_,err = module.run_command("%s %s" % (pvcreate_cmd, current_dev))
+ rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd, current_dev))
if rc == 0:
changed = True
else:
@@ -251,4 +264,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/system/lvol.py b/system/lvol.py
index 7a01d83829c..3ab60cb40ac 100644
--- a/system/lvol.py
+++ b/system/lvol.py
@@ -18,11 +18,15 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
author:
- - "Jeroen Hoekx (@jhoekx)"
- - "Alexander Bulimov (@abulimov)"
+ - "Jeroen Hoekx (@jhoekx)"
+ - "Alexander Bulimov (@abulimov)"
module: lvol
short_description: Configure LVM logical volumes
description:
@@ -42,12 +46,21 @@
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
- resizing is not supported with percentages.
+ Float values must begin with a digit.
+ Resizing using percentage values was not supported prior to 2.1.
state:
choices: [ "present", "absent" ]
default: present
description:
- - Control if the logical volume exists.
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ required: false
+ active:
+ version_added: "2.2"
+ choices: [ "yes", "no" ]
+ default: "yes"
+ description:
+ - Whether the volume is activate and visible to the host.
required: false
force:
version_added: "1.5"
@@ -61,36 +74,136 @@
version_added: "2.0"
description:
- Free-form options to be passed to the lvcreate command
+ snapshot:
+ version_added: "2.1"
+ description:
+ - The name of the snapshot volume
+ required: false
+ pvs:
+ version_added: "2.2"
+ description:
+ - Comma separated list of physical volumes e.g. /dev/sda,/dev/sdb
+ required: false
+ shrink:
+ version_added: "2.2"
+ description:
+ - shrink if current size is higher than size requested
+ required: false
+ default: yes
notes:
- Filesystems on top of the volume are not resized.
'''
EXAMPLES = '''
# Create a logical volume of 512m.
-- lvol: vg=firefly lv=test size=512
+- lvol:
+ vg: firefly
+ lv: test
+ size: 512
+
+# Create a logical volume of 512m with disks /dev/sda and /dev/sdb
+- lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ pvs: /dev/sda,/dev/sdb
+
+# Create cache pool logical volume
+- lvol:
+ vg: firefly
+ lv: lvcache
+ size: 512m
+ opts: --type cache-pool
# Create a logical volume of 512g.
-- lvol: vg=firefly lv=test size=512g
+- lvol:
+ vg: firefly
+ lv: test
+ size: 512g
# Create a logical volume the size of all remaining space in the volume group
-- lvol: vg=firefly lv=test size=100%FREE
+- lvol:
+ vg: firefly
+ lv: test
+ size: 100%FREE
# Create a logical volume with special options
-- lvol: vg=firefly lv=test size=512g opts="-r 16"
+- lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ opts: -r 16
# Extend the logical volume to 1024m.
-- lvol: vg=firefly lv=test size=1024
+- lvol:
+ vg: firefly
+ lv: test
+ size: 1024
+
+# Extend the logical volume to consume all remaining space in the volume group
+- lvol:
+ vg: firefly
+ lv: test
+ size: +100%FREE
+
+# Extend the logical volume to take all remaining space of the PVs
+- lvol:
+ vg: firefly
+ lv: test
+ size: 100%PVS
+
+# Resize the logical volume to % of VG
+- lvol:
+ vg: firefly
+ lv: test
+ size: 80%VG
+ force: yes
# Reduce the logical volume to 512m
-- lvol: vg=firefly lv=test size=512 force=yes
+- lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ force: yes
+
+# Set the logical volume to 512m and do not try to shrink if size is lower than current one
+- lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ shrink: no
# Remove the logical volume.
-- lvol: vg=firefly lv=test state=absent force=yes
+- lvol:
+ vg: firefly
+ lv: test
+ state: absent
+ force: yes
+
+# Create a snapshot volume of the test logical volume.
+- lvol:
+ vg: firefly
+ lv: test
+ snapshot: snap1
+ size: 100m
+
+# Deactivate a logical volume
+- lvol:
+ vg: firefly
+ lv: test
+ active: false
+
+# Create a deactivated logical volume
+- lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ active: false
'''
import re
-decimal_point = re.compile(r"(\.|,)")
+decimal_point = re.compile(r"(\d+)")
def mkversion(major, minor, patch):
return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
@@ -100,11 +213,24 @@ def parse_lvs(data):
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
- 'name': parts[0],
- 'size': int(decimal_point.split(parts[1])[0]),
+ 'name': parts[0].replace('[','').replace(']',''),
+ 'size': int(decimal_point.match(parts[1]).group(1)),
+ 'active': (parts[2][4] == 'a')
})
return lvs
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'size': int(decimal_point.match(parts[1]).group(1)),
+ 'free': int(decimal_point.match(parts[2]).group(1)),
+ 'ext_size': int(decimal_point.match(parts[3]).group(1))
+ })
+ return vgs
+
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
@@ -122,10 +248,14 @@ def main():
argument_spec=dict(
vg=dict(required=True),
lv=dict(required=True),
- size=dict(),
+ size=dict(type='str'),
opts=dict(type='str'),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
+ shrink=dict(type='bool', default='yes'),
+ active=dict(type='bool', default='yes'),
+ snapshot=dict(type='str', default=None),
+ pvs=dict(type='str')
),
supports_check_mode=True,
)
@@ -146,12 +276,27 @@ def main():
opts = module.params['opts']
state = module.params['state']
force = module.boolean(module.params['force'])
+ shrink = module.boolean(module.params['shrink'])
+ active = module.boolean(module.params['active'])
size_opt = 'L'
size_unit = 'm'
+ snapshot = module.params['snapshot']
+ pvs = module.params['pvs']
+
+ if pvs is None:
+ pvs = ""
+ else:
+ pvs = pvs.replace(",", " ")
if opts is None:
opts = ""
+ # Add --test option when running in check-mode
+ if module.check_mode:
+ test_opt = ' --test'
+ else:
+ test_opt = ''
+
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
@@ -167,35 +312,46 @@ def main():
size_opt = 'l'
size_unit = ''
+ if not '%' in size:
# LVCREATE(8) -L --size option unit
- elif size[-1].isalpha():
if size[-1].lower() in 'bskmgtpe':
- size_unit = size[-1].lower()
- if size[0:-1].isdigit():
- size = int(size[0:-1])
- else:
- module.fail_json(msg="Bad size specification for unit %s" % size_unit)
- size_opt = 'L'
- else:
- module.fail_json(msg="Size unit should be one of [bBsSkKmMgGtTpPeE]")
- # when no unit, megabytes by default
- elif size.isdigit():
- size = int(size)
- else:
- module.fail_json(msg="Bad size specification")
+ size_unit = size[-1].lower()
+ size = size[0:-1]
+
+ try:
+ float(size)
+ if not size[0].isdigit(): raise ValueError()
+ except ValueError:
+ module.fail_json(msg="Bad size specification of '%s'" % size)
+ # when no unit, megabytes by default
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
+ # Get information on volume group requested
+ vgs_cmd = module.get_bin_path("vgs", required=True)
+ rc, current_vgs, err = module.run_command(
+ "%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ vgs = parse_vgs(current_vgs)
+ this_vg = vgs[0]
+
+ # Get information on logical volume requested
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
- "%s --noheadings --nosuffix -o lv_name,size --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
+ "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
- module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False)
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
@@ -203,8 +359,12 @@ def main():
lvs = parse_lvs(current_lvs)
+ if snapshot is None:
+ check_lv = lv
+ else:
+ check_lv = snapshot
for test_lv in lvs:
- if test_lv['name'] == lv:
+ if test_lv['name'] == check_lv:
this_lv = test_lv
break
else:
@@ -213,61 +373,118 @@ def main():
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
- else:
- module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
msg = ''
if this_lv is None:
if state == 'present':
### create LV
- if module.check_mode:
+ lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
+ if snapshot is not None:
+ cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
+ else:
+ cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
+ rc, _, err = module.run_command(cmd)
+ if rc == 0:
changed = True
else:
- lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
- cmd = "%s %s -n %s -%s %s%s %s %s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg)
- rc, _, err = module.run_command(cmd)
- if rc == 0:
- changed = True
- else:
- module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
+ module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
### remove LV
- if module.check_mode:
- module.exit_json(changed=True)
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
- rc, _, err = module.run_command("%s --force %s/%s" % (lvremove_cmd, vg, this_lv['name']))
+ rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
+ elif not size:
+ pass
+
elif size_opt == 'l':
- module.exit_json(changed=False, msg="Resizing extents with percentage not supported.")
+ ### Resize LV based on % value
+ tool = None
+ size_free = this_vg['free']
+ if size_whole == 'VG' or size_whole == 'PVS':
+ size_requested = size_percent * this_vg['size'] / 100
+ else: # size_whole == 'FREE':
+ size_requested = size_percent * this_vg['free'] / 100
+ if '+' in size:
+ size_requested += this_lv['size']
+ if this_lv['size'] < size_requested:
+ if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
+ tool = module.get_bin_path("lvextend", required=True)
+ else:
+ module.fail_json(msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit))
+ elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
+ if size_requested == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ elif not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
else:
- ### resize LV
+ ### resize LV based on absolute values
tool = None
- if size > this_lv['size']:
+ if int(size) > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
- elif size < this_lv['size']:
+ elif shrink and int(size) < this_lv['size']:
+ if int(size) == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
- tool = module.get_bin_path("lvreduce", required=True)
- tool = '%s %s' % (tool, '--force')
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
if tool:
- if module.check_mode:
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
changed = True
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
- rc, _, err = module.run_command("%s -%s %s%s %s/%s" % (tool, size_opt, size, size_unit, vg, this_lv['name']))
- if rc == 0:
- changed = True
- elif "matches existing size" in err:
- module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
- else:
- module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ if this_lv is not None:
+ if active:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
+ else:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
diff --git a/system/make.py b/system/make.py
new file mode 100644
index 00000000000..2b618db9fac
--- /dev/null
+++ b/system/make.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Linus Unnebäck
+#
+# This file is part of Ansible
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: make
+short_description: Run targets in a Makefile
+requirements: [ make ]
+version_added: "2.1"
+author: Linus Unnebäck (@LinusU)
+description:
+ - Run targets in a Makefile.
+options:
+ target:
+ description:
+ - The target to run
+ required: false
+ default: none
+ params:
+ description:
+ - Any extra parameters to pass to make
+ required: false
+ default: none
+ chdir:
+ description:
+ - cd into this directory before running make
+ required: true
+'''
+
+EXAMPLES = '''
+# Build the default target
+- make:
+ chdir: /home/ubuntu/cool-project
+
+# Run `install` target as root
+- make:
+ chdir: /home/ubuntu/cool-project
+ target: install
+ become: yes
+
+# Pass in extra arguments to build
+- make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ params:
+ NUM_THREADS: 4
+ BACKEND: lapack
+'''
+
+# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
+# fix this
+RETURN = '''# '''
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_command(command, module, check_rc=True):
+ """
+ Run a command using the module, return
+ the result code and std{err,out} content.
+
+ :param command: list of command arguments
+ :param module: Ansible make module instance
+ :return: return code, stdout content, stderr content
+ """
+ rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir'])
+ return rc, sanitize_output(out), sanitize_output(err)
+
+
+def sanitize_output(output):
+ """
+ Sanitize the output string before we
+ pass it to module.fail_json. Defaults
+ the string to empty if it is None, else
+ strips trailing newlines.
+
+ :param output: output to sanitize
+ :return: sanitized output
+ """
+ if output is None:
+ return ''
+ else:
+ return output.rstrip("\r\n")
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ target=dict(required=False, default=None, type='str'),
+ params=dict(required=False, default=None, type='dict'),
+ chdir=dict(required=True, default=None, type='path'),
+ ),
+ )
+ # Build up the invocation of `make` we are going to use
+ make_path = module.get_bin_path('make', True)
+ make_target = module.params['target']
+ if module.params['params'] is not None:
+ make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
+ else:
+ make_parameters = []
+
+ base_command = [make_path, make_target]
+ base_command.extend(make_parameters)
+
+ # Check if the target is already up to date
+ rc, out, err = run_command(base_command + ['--question'], module, check_rc=False)
+ if module.check_mode:
+ # If we've been asked to do a dry run, we only need
+ # to report whether or not the target is up to date
+ changed = (rc != 0)
+ else:
+ if rc == 0:
+ # The target is up to date, so we don't have to
+ # do anything
+ changed = False
+ else:
+ # The target isn't upd to date, so we need to run it
+ rc, out, err = run_command(base_command, module)
+ changed = True
+
+ # We don't report the return code, as if this module failed
+ # we would be calling fail_json from run_command, so even if
+ # we had a non-zero return code, we did not fail. However, if
+ # we report a non-zero return code here, we will be marked as
+ # failed regardless of what we signal using the failed= kwarg.
+ module.exit_json(
+ changed=changed,
+ failed=False,
+ stdout=out,
+ stderr=err,
+ target=module.params['target'],
+ params=module.params['params'],
+ chdir=module.params['chdir']
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/system/modprobe.py b/system/modprobe.py
index 64e36c784a7..d84f0d3377d 100644
--- a/system/modprobe.py
+++ b/system/modprobe.py
@@ -19,6 +19,10 @@
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: modprobe
@@ -52,11 +56,22 @@
EXAMPLES = '''
# Add the 802.1q module
-- modprobe: name=8021q state=present
+- modprobe:
+ name: 8021q
+ state: present
+
# Add the dummy module
-- modprobe: name=dummy state=present params="numdummies=2"
+- modprobe:
+ name: dummy
+ state: present
+ params: 'numdummies=2'
'''
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+import shlex
+
+
def main():
module = AnsibleModule(
argument_spec={
@@ -84,7 +99,8 @@ def main():
present = True
break
modules.close()
- except IOError, e:
+ except IOError:
+ e = get_exception()
module.fail_json(msg=str(e), **args)
# Check only; don't modify
@@ -100,19 +116,20 @@ def main():
# Add/remove module as needed
if args['state'] == 'present':
if not present:
- rc, _, err = module.run_command([module.get_bin_path('modprobe', True), args['name'], args['params']])
+ command = [module.get_bin_path('modprobe', True), args['name']]
+ command.extend(shlex.split(args['params']))
+ rc, _, err = module.run_command(command)
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
elif args['state'] == 'absent':
if present:
- rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']])
+ rc, _, err = module.run_command([module.get_bin_path('modprobe', True), '-r', args['name']])
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
module.exit_json(**args)
-# import module snippets
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/ohai.py b/system/ohai.py
index 6f066ec5ad8..47926a34d12 100644
--- a/system/ohai.py
+++ b/system/ohai.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ohai
@@ -53,6 +57,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
-
-
+if __name__ == '__main__':
+ main()
diff --git a/system/open_iscsi.py b/system/open_iscsi.py
index 084303d7b52..2e3c0e838f8 100644
--- a/system/open_iscsi.py
+++ b/system/open_iscsi.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: open_iscsi
@@ -84,23 +88,32 @@
description:
- whether the list of nodes in the persistent iscsi database should be
returned by the module
+'''
-examples:
- - description: perform a discovery on 10.1.2.3 and show available target
- nodes
- code: >
- open_iscsi: show_nodes=yes discover=yes portal=10.1.2.3
- - description: discover targets on portal and login to the one available
- (only works if exactly one target is exported to the initiator)
- code: >
- open_iscsi: portal={{iscsi_target}} login=yes discover=yes
- - description: connect to the named target, after updating the local
- persistent database (cache)
- code: >
- open_iscsi: login=yes target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
- - description: discconnect from the cached named target
- code: >
- open_iscsi: login=no target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d"
+EXAMPLES = '''
+# perform a discovery on 10.1.2.3 and show available target nodes
+- open_iscsi:
+ show_nodes: yes
+ discover: yes
+ portal: 10.1.2.3
+
+# discover targets on portal and login to the one available
+# (only works if exactly one target is exported to the initiator)
+- open_iscsi:
+ portal: '{{ iscsi_target }}'
+ login: yes
+ discover: yes
+
+# description: connect to the named target, after updating the local
+# persistent database (cache)
+- open_iscsi:
+ login: yes
+ target: 'iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d'
+
+# description: discconnect from the cached named target
+- open_iscsi:
+ login: no
+ target: 'iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d'
'''
import glob
@@ -372,5 +385,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
-
+if __name__ == '__main__':
+ main()
diff --git a/system/openwrt_init.py b/system/openwrt_init.py
new file mode 100644
index 00000000000..7b4f7f79d37
--- /dev/null
+++ b/system/openwrt_init.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2016, Andrew Gaffney
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: openwrt_init
+author:
+ - "Andrew Gaffney (@agaffney)"
+version_added: "2.3"
+short_description: Manage services on OpenWrt.
+description:
+ - Controls OpenWrt services on remote hosts.
+options:
+ name:
+ required: true
+ description:
+ - Name of the service.
+ aliases: ['service']
+ state:
+ required: false
+ default: null
+ choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ C(restarted) will always bounce the service. C(reloaded) will always reload.
+ enabled:
+ required: false
+ choices: [ "yes", "no" ]
+ default: null
+ description:
+ - Whether the service should start on boot. B(At least one of state and enabled are required.)
+ pattern:
+ required: false
+ description:
+ - If the service does not respond to the 'running' command, name a
+ substring to look for as would be found in the output of the I(ps)
+ command as a stand-in for a 'running' result. If the string is found,
+ the service will be assumed to be running.
+notes:
+ - One option other than name is required.
+requirements:
+ - An OpenWrt system
+'''
+
+EXAMPLES = '''
+# Example action to start service httpd, if not running
+- openwrt_init:
+ state: started
+ name: httpd
+
+# Example action to stop service cron, if running
+- openwrt_init:
+ name: cron
+ state: stopped
+
+# Example action to reload service httpd, in all cases
+- openwrt_init:
+ name: httpd
+ state: reloaded
+
+# Example action to enable service httpd
+- openwrt_init:
+ name: httpd
+ enabled: yes
+'''
+
+RETURN = '''
+'''
+
+import os
+import glob
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+module = None
+init_script = None
+
+# ===============================
+# Check if service is enabled
+def is_enabled():
+ (rc, out, err) = module.run_command("%s enabled" % init_script)
+ if rc == 0:
+ return True
+ return False
+
+# ===========================================
+# Main control flow
+
+def main():
+ global module, init_script
+ # init
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True, type='str', aliases=['service']),
+ state = dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'),
+ enabled = dict(type='bool'),
+ pattern = dict(required=False, default=None),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+ # initialize
+ service = module.params['name']
+ init_script = '/etc/init.d/' + service
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ }
+
+ # check if service exists
+ if not os.path.exists(init_script):
+ module.fail_json(msg='service %s does not exist' % service)
+
+ # Enable/disable service startup at boot if requested
+ if module.params['enabled'] is not None:
+ # do we need to enable the service?
+ enabled = is_enabled()
+
+ # default to current state
+ result['enabled'] = enabled
+
+ # Change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s" % (init_script, action))
+ # openwrt init scripts can return a non-zero exit code on a successful 'enable'
+ # command if the init script doesn't contain a STOP value, so we ignore the exit
+ # code and explicitly check if the service is now in the desired state
+ if is_enabled() != module.params['enabled']:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+ result['enabled'] = not enabled
+
+ if module.params['state'] is not None:
+ running = False
+
+ # check if service is currently running
+ if module.params['pattern']:
+ # Find ps binary
+ psbin = module.get_bin_path('ps', True)
+
+ # this should be busybox ps, so we only want/need to the 'w' option
+ (rc, psout, pserr) = module.run_command('%s w' % psbin)
+ # If rc is 0, set running as appropriate
+ if rc == 0:
+ lines = psout.split("\n")
+ for line in lines:
+ if module.params['pattern'] in line and not "pattern=" in line:
+ # so as to not confuse ./hacking/test-module
+ running = True
+ break
+ else:
+ (rc, out, err) = module.run_command("%s running" % init_script)
+ if rc == 0:
+ running = True
+
+ # default to desired state
+ result['state'] = module.params['state']
+
+ # determine action, if any
+ action = None
+ if module.params['state'] == 'started':
+ if not running:
+ action = 'start'
+ result['changed'] = True
+ elif module.params['state'] == 'stopped':
+ if running:
+ action = 'stop'
+ result['changed'] = True
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ result['state'] = 'started'
+ result['changed'] = True
+
+ if action:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s" % (init_script, action))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+
+ module.exit_json(**result)
+
+if __name__ == '__main__':
+ main()
diff --git a/system/osx_defaults.py b/system/osx_defaults.py
index e4dc5f8c750..757cc811d92 100644
--- a/system/osx_defaults.py
+++ b/system/osx_defaults.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: osx_defaults
@@ -33,6 +37,13 @@
- The domain is a domain name of the form com.companyname.appname.
required: false
default: NSGlobalDomain
+ host:
+ description:
+ - The host on which the preference should apply. The special value "currentHost" corresponds to the
+ "-currentHost" switch of the defaults commandline tool.
+ required: false
+ default: null
+ version_added: "2.1"
key:
description:
- The key of the user preference
@@ -65,17 +76,48 @@
'''
EXAMPLES = '''
-- osx_defaults: domain=com.apple.Safari key=IncludeInternalDebugMenu type=bool value=true state=present
-- osx_defaults: domain=NSGlobalDomain key=AppleMeasurementUnits type=string value=Centimeters state=present
-- osx_defaults: key=AppleMeasurementUnits type=string value=Centimeters
+- osx_defaults:
+ domain: com.apple.Safari
+ key: IncludeInternalDebugMenu
+ type: bool
+ value: true
+ state: present
+
+- osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+ state: present
+
+- osx_defaults:
+ domain: com.apple.screensaver
+ host: currentHost
+ key: showClock
+ type: int
+ value: 1
+
+- osx_defaults:
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+
- osx_defaults:
key: AppleLanguages
type: array
- value: ["en", "nl"]
-- osx_defaults: domain=com.geekchimp.macable key=ExampleKeyToRemove state=absent
+ value:
+ - en
+ - nl
+
+- osx_defaults:
+ domain: com.geekchimp.macable
+ key: ExampleKeyToRemove
+ state: absent
'''
-from datetime import datetime
+import datetime
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
# exceptions --------------------------------------------------------------- {{{
class OSXDefaultsException(Exception):
@@ -124,14 +166,16 @@ def _convert_type(self, type, value):
if type == "string":
return str(value)
elif type in ["bool", "boolean"]:
- if value.lower() in [True, 1, "true", "1", "yes"]:
+ if isinstance(value, basestring):
+ value = value.lower()
+ if value in [True, 1, "true", "1", "yes"]:
return True
- elif value.lower() in [False, 0, "false", "0", "no"]:
+ elif value in [False, 0, "false", "0", "no"]:
return False
raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
elif type == "date":
try:
- return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
+ return datetime.datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
except ValueError:
raise OSXDefaultsException(
"Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
@@ -153,6 +197,19 @@ def _convert_type(self, type, value):
raise OSXDefaultsException('Type is not supported: {0}'.format(type))
+ """ Returns a normalized list of commandline arguments based on the "host" attribute """
+ def _host_args(self):
+ if self.host is None:
+ return []
+ elif self.host == 'currentHost':
+ return ['-currentHost']
+ else:
+ return ['-host', self.host]
+
+ """ Returns a list containing the "defaults" executable and any common base arguments """
+ def _base_command(self):
+ return [self.executable] + self._host_args()
+
""" Converts array output from defaults to an list """
@staticmethod
def _convert_defaults_str_to_list(value):
@@ -174,7 +231,7 @@ def _convert_defaults_str_to_list(value):
""" Reads value of this domain & key from defaults """
def read(self):
# First try to find out the type
- rc, out, err = self.module.run_command([self.executable, "read-type", self.domain, self.key])
+ rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
# If RC is 1, the key does not exists
if rc == 1:
@@ -188,7 +245,7 @@ def read(self):
type = out.strip().replace('Type is ', '')
# Now get the current value
- rc, out, err = self.module.run_command([self.executable, "read", self.domain, self.key])
+ rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
# Strip output
out = out.strip()
@@ -208,16 +265,16 @@ def read(self):
def write(self):
# We need to convert some values so the defaults commandline understands it
- if type(self.value) is bool:
+ if isinstance(self.value, bool):
if self.value:
value = "TRUE"
else:
value = "FALSE"
- elif type(self.value) is int or type(self.value) is float:
+ elif isinstance(self.value, (int, float)):
value = str(self.value)
elif self.array_add and self.current_value is not None:
value = list(set(self.value) - set(self.current_value))
- elif isinstance(self.value, datetime):
+ elif isinstance(self.value, datetime.datetime):
value = self.value.strftime('%Y-%m-%d %H:%M:%S')
else:
value = self.value
@@ -230,14 +287,14 @@ def write(self):
if not isinstance(value, list):
value = [value]
- rc, out, err = self.module.run_command([self.executable, 'write', self.domain, self.key, '-' + self.type] + value)
+ rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
if rc != 0:
raise OSXDefaultsException('An error occurred while writing value to defaults: ' + out)
""" Deletes defaults key from domain """
def delete(self):
- rc, out, err = self.module.run_command([self.executable, 'delete', self.domain, self.key])
+ rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
if rc != 0:
raise OSXDefaultsException("An error occurred while deleting key from defaults: " + out)
@@ -252,14 +309,16 @@ def run(self):
# Handle absent state
if self.state == "absent":
- print "Absent state detected!"
if self.current_value is None:
return False
+ if self.module.check_mode:
+ return True
self.delete()
return True
# There is a type mismatch! Given type does not match the type in defaults
- if self.current_value is not None and type(self.current_value) is not type(self.value):
+ value_type = type(self.value)
+ if self.current_value is not None and not isinstance(self.current_value, value_type):
raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)
# Current value matches the given value. Nothing need to be done. Arrays need extra care
@@ -272,6 +331,9 @@ def run(self):
elif self.current_value == self.value:
return False
+ if self.module.check_mode:
+ return True
+
# Change/Create/Set given key/value for domain in defaults
self.write()
return True
@@ -289,6 +351,10 @@ def main():
default="NSGlobalDomain",
required=False,
),
+ host=dict(
+ default=None,
+ required=False,
+ ),
key=dict(
default=None,
),
@@ -309,7 +375,7 @@ def main():
array_add=dict(
default=False,
required=False,
- choices=BOOLEANS,
+ type='bool',
),
value=dict(
default=None,
@@ -331,6 +397,7 @@ def main():
)
domain = module.params['domain']
+ host = module.params['host']
key = module.params['key']
type = module.params['type']
array_add = module.params['array_add']
@@ -339,14 +406,15 @@ def main():
path = module.params['path']
try:
- defaults = OSXDefaults(module=module, domain=domain, key=key, type=type,
+ defaults = OSXDefaults(module=module, domain=domain, host=host, key=key, type=type,
array_add=array_add, value=value, state=state, path=path)
changed = defaults.run()
module.exit_json(changed=changed)
- except OSXDefaultsException, e:
+ except OSXDefaultsException:
+ e = get_exception()
module.fail_json(msg=e.message)
# /main ------------------------------------------------------------------- }}}
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/pam_limits.py b/system/pam_limits.py
index 080b938dd01..f47fbf06bbf 100644
--- a/system/pam_limits.py
+++ b/system/pam_limits.py
@@ -23,10 +23,16 @@
import shutil
import re
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: pam_limits
version_added: "2.0"
+authors:
+ - "Sebastien Rohaut (@usawa)"
short_description: Modify Linux PAM limits
description:
- The M(pam_limits) module modify PAM limits, default in /etc/security/limits.conf.
@@ -40,7 +46,7 @@
description:
- Limit type, see C(man limits) for an explanation
required: true
- choices: [ "hard", "soft" ]
+ choices: [ "hard", "soft", "-" ]
limit_item:
description:
- The limit to be set
@@ -78,14 +84,36 @@
- Modify the limits.conf path.
required: false
default: "/etc/security/limits.conf"
+ comment:
+ description:
+ - Comment associated with the limit.
+ required: false
+ default: ''
'''
EXAMPLES = '''
-# Add or modify limits for the user joe
-- pam_limits: domain=joe limit_type=soft limit_item=nofile value=64000
-
-# Add or modify limits for the user joe. Keep or set the maximal value
-- pam_limits: domain=joe limit_type=soft limit_item=nofile value=1000000
+# Add or modify nofile soft limit for the user joe
+- pam_limits:
+ domain: joe
+ limit_type: soft
+ limit_item: nofile
+ value: 64000
+
+# Add or modify fsize hard limit for the user smith. Keep or set the maximal value.
+- pam_limits:
+ domain: smith
+ limit_type: hard
+ limit_item: fsize
+ value: 1000000
+ use_max: yes
+
+# Add or modify memlock, both soft and hard, limit for the user james with a comment.
+- pam_limits:
+ domain: james
+ limit_type: -
+ limit_item: memlock
+ value: unlimited
+ comment: unlimited memory lock for james
'''
def main():
@@ -102,7 +130,7 @@ def main():
domain = dict(required=True, type='str'),
limit_type = dict(required=True, type='str', choices=pam_types),
limit_item = dict(required=True, type='str', choices=pam_items),
- value = dict(required=True, type='int'),
+ value = dict(required=True, type='str'),
use_max = dict(default=False, type='bool'),
use_min = dict(default=False, type='bool'),
backup = dict(default=False, type='bool'),
@@ -132,6 +160,9 @@ def main():
if use_max and use_min:
module.fail_json(msg="Cannot use use_min and use_max at the same time." )
+ if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):
+ module.fail_json(msg="Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.")
+
# Backup
if backup:
backup_file = module.backup_local(limits_conf)
@@ -141,7 +172,7 @@ def main():
message = ''
f = open (limits_conf, 'r')
# Tempfile
- nf = tempfile.NamedTemporaryFile(delete = False)
+ nf = tempfile.NamedTemporaryFile()
found = False
new_value = value
@@ -181,7 +212,10 @@ def main():
line_domain = line_fields[0]
line_type = line_fields[1]
line_item = line_fields[2]
- actual_value = int(line_fields[3])
+ actual_value = line_fields[3]
+
+ if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()):
+ module.fail_json(msg="Invalid configuration of '%s'. Current value of %s is unsupported." % (limits_conf, line_item))
# Found the line
if line_domain == domain and line_type == limit_type and line_item == limit_item:
@@ -191,16 +225,29 @@ def main():
nf.write(line)
continue
+ actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']
+ value_unlimited = value in ['unlimited', 'infinity', '-1']
+
if use_max:
- new_value = max(value, actual_value)
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(max(int(value), int(actual_value)))
+ elif actual_value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
if use_min:
- new_value = min(value,actual_value)
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(min(int(value), int(actual_value)))
+ elif value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
# Change line only if value has changed
if new_value != actual_value:
changed = True
- new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + str(new_value) + new_comment + "\n"
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
message = new_limit
nf.write(new_limit)
else:
@@ -211,16 +258,21 @@ def main():
if not found:
changed = True
- new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + str(new_value) + new_comment + "\n"
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
message = new_limit
nf.write(new_limit)
f.close()
- nf.close()
+ nf.flush()
# Copy tempfile to newfile
module.atomic_move(nf.name, f.name)
+ try:
+ nf.close()
+ except:
+ pass
+
res_args = dict(
changed = changed, msg = message
)
@@ -233,4 +285,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/system/puppet.py b/system/puppet.py
index 48a497c37ce..15acb97d262 100644
--- a/system/puppet.py
+++ b/system/puppet.py
@@ -15,11 +15,24 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see .
-import json
import os
import pipes
import stat
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: puppet
@@ -39,16 +52,10 @@
required: false
default: None
manifest:
- desciption:
+ description:
- Path to the manifest file to run puppet apply on.
required: false
default: None
- show_diff:
- description:
- - Should puppet return diffs of changes applied. Defaults to off to avoid leaking secret changes by default.
- required: false
- default: no
- choices: [ "yes", "no" ]
facts:
description:
- A dict of values to pass in as persistent external facter facts
@@ -64,6 +71,32 @@
- Puppet environment to be used.
required: false
default: None
+ logdest:
+ description:
+ - Where the puppet logs should go, if puppet apply is being used
+ required: false
+ default: stdout
+ choices: [ 'stdout', 'syslog' ]
+ version_added: "2.1"
+ certname:
+ description:
+ - The name to use when handling certificates.
+ required: false
+ default: None
+ version_added: "2.1"
+ tags:
+ description:
+ - A comma-separated list of puppet tags to be used.
+ required: false
+ default: None
+ version_added: "2.1"
+ execute:
+ description:
+ - Execute a specific piece of Puppet code. It has no effect with
+ a puppetmaster.
+ required: false
+ default: None
+ version_added: "2.1"
requirements: [ puppet ]
author: "Monty Taylor (@emonty)"
'''
@@ -73,10 +106,25 @@
- puppet
# Run puppet and timeout in 5 minutes
-- puppet: timeout=5m
+- puppet:
+ timeout: 5m
# Run puppet using a different environment
-- puppet: environment=testing
+- puppet:
+ environment: testing
+
+# Run puppet using a specific certname
+- puppet:
+ certname: agent01.example.com
+
+# Run puppet using a specific piece of Puppet code. Has no effect with a
+# puppetmaster.
+- puppet:
+ execute: 'include ::mymodule'
+
+# Run puppet using a specific tags
+- puppet:
+ tags: update,nginx
'''
@@ -108,26 +156,37 @@ def main():
timeout=dict(default="30m"),
puppetmaster=dict(required=False, default=None),
manifest=dict(required=False, default=None),
+ logdest=dict(
+ required=False, default='stdout',
+ choices=['stdout', 'syslog']),
show_diff=dict(
+ # internal code to work with --diff, do not use
default=False, aliases=['show-diff'], type='bool'),
facts=dict(default=None),
facter_basename=dict(default='ansible'),
environment=dict(required=False, default=None),
+ certname=dict(required=False, default=None),
+ tags=dict(required=False, default=None, type='list'),
+ execute=dict(required=False, default=None),
),
supports_check_mode=True,
mutually_exclusive=[
('puppetmaster', 'manifest'),
+ ('puppetmaster', 'manifest', 'execute'),
],
)
p = module.params
global PUPPET_CMD
- PUPPET_CMD = module.get_bin_path("puppet", False)
+ PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
if not PUPPET_CMD:
module.fail_json(
msg="Could not find puppet. Please ensure it is installed.")
+ global TIMEOUT_CMD
+ TIMEOUT_CMD = module.get_bin_path("timeout", False)
+
if p['manifest']:
if not os.path.exists(p['manifest']):
module.fail_json(
@@ -140,7 +199,8 @@ def main():
PUPPET_CMD + " config print agent_disabled_lockfile")
if os.path.exists(stdout.strip()):
module.fail_json(
- msg="Puppet agent is administratively disabled.", disabled=True)
+ msg="Puppet agent is administratively disabled.",
+ disabled=True)
elif rc != 0:
module.fail_json(
msg="Puppet agent state could not be determined.")
@@ -151,13 +211,18 @@ def main():
module.params['facter_basename'],
module.params['facts'])
- base_cmd = "timeout -s 9 %(timeout)s %(puppet_cmd)s" % dict(
- timeout=pipes.quote(p['timeout']), puppet_cmd=PUPPET_CMD)
+ if TIMEOUT_CMD:
+ base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
+ timeout_cmd=TIMEOUT_CMD,
+ timeout=pipes.quote(p['timeout']),
+ puppet_cmd=PUPPET_CMD)
+ else:
+ base_cmd = PUPPET_CMD
if not p['manifest']:
cmd = ("%(base_cmd)s agent --onetime"
" --ignorecache --no-daemonize --no-usecacheonfailure --no-splay"
- " --detailed-exitcodes --verbose") % dict(
+ " --detailed-exitcodes --verbose --color 0") % dict(
base_cmd=base_cmd,
)
if p['puppetmaster']:
@@ -166,14 +231,26 @@ def main():
cmd += " --show_diff"
if p['environment']:
cmd += " --environment '%s'" % p['environment']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
if module.check_mode:
cmd += " --noop"
else:
cmd += " --no-noop"
else:
cmd = "%s apply --detailed-exitcodes " % base_cmd
+ if p['logdest'] == 'syslog':
+ cmd += "--logdest syslog "
if p['environment']:
cmd += "--environment '%s' " % p['environment']
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
+ if p['execute']:
+ cmd += " --execute '%s'" % p['execute']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
if module.check_mode:
cmd += "--noop "
else:
@@ -183,7 +260,7 @@ def main():
if rc == 0:
# success
- module.exit_json(rc=rc, changed=False, stdout=stdout)
+ module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
elif rc == 1:
# rc==1 could be because it's disabled
# rc==1 could also mean there was a compilation failure
@@ -197,7 +274,7 @@ def main():
error=True, stdout=stdout, stderr=stderr)
elif rc == 2:
# success with changes
- module.exit_json(rc=0, changed=True)
+ module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
elif rc == 124:
# timeout
module.exit_json(
@@ -211,4 +288,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/sefcontext.py b/system/sefcontext.py
new file mode 100644
index 00000000000..f1000b34cc1
--- /dev/null
+++ b/system/sefcontext.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+
+# (c) 2016, Dag Wieers
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: sefcontext
+short_description: Manages SELinux file context mapping definitions
+description:
+ - Manages SELinux file context mapping definitions
+ - Similar to the C(semanage fcontext) command
+version_added: "2.2"
+options:
+ target:
+ description:
+ - Target path (expression).
+ required: true
+ default: null
+ aliases: ['path']
+ ftype:
+ description:
+ - File type.
+ required: false
+ default: a
+ setype:
+ description:
+ - SELinux type for the specified target.
+ required: true
+ default: null
+ seuser:
+ description:
+ - SELinux user for the specified target.
+ required: false
+ default: null
+ selevel:
+ description:
+ - SELinux range for the specified target.
+ required: false
+ default: null
+ aliases: ['serange']
+ state:
+ description:
+ - Desired boolean value.
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ required: false
+ default: yes
+notes:
+ - The changes are persistent across reboots
+requirements: [ 'libselinux-python', 'policycoreutils-python' ]
+author: Dag Wieers
+'''
+
+EXAMPLES = '''
+# Allow apache to modify files in /srv/git_repos
+- sefcontext:
+ target: '/srv/git_repos(/.*)?'
+ setype: httpd_git_rw_content_t
+ state: present
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils._text import to_native
+
+try:
+ import selinux
+ HAVE_SELINUX=True
+except ImportError:
+ HAVE_SELINUX=False
+
+try:
+ import seobject
+ HAVE_SEOBJECT=True
+except ImportError:
+ HAVE_SEOBJECT=False
+
+### Add missing entries (backward compatible)
+seobject.file_types.update(dict(
+ a = seobject.SEMANAGE_FCONTEXT_ALL,
+ b = seobject.SEMANAGE_FCONTEXT_BLOCK,
+ c = seobject.SEMANAGE_FCONTEXT_CHAR,
+ d = seobject.SEMANAGE_FCONTEXT_DIR,
+ f = seobject.SEMANAGE_FCONTEXT_REG,
+ l = seobject.SEMANAGE_FCONTEXT_LINK,
+ p = seobject.SEMANAGE_FCONTEXT_PIPE,
+ s = seobject.SEMANAGE_FCONTEXT_SOCK,
+))
+
+### Make backward compatible
+option_to_file_type_str = dict(
+ a = 'all files',
+ b = 'block device',
+ c = 'character device',
+ d = 'directory',
+ f = 'regular file',
+ l = 'symbolic link',
+ p = 'named pipe',
+ s = 'socket file',
+)
+
+def semanage_fcontext_exists(sefcontext, target, ftype):
+ ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
+
+ # Beware that records comprise of a string representation of the file_type
+ record = (target, option_to_file_type_str[ftype])
+ records = sefcontext.get_all()
+ try:
+ return records[record]
+ except KeyError:
+ return None
+
+def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
+ ''' Add or modify SELinux file context mapping definition to the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Modify existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if seuser is None:
+ seuser = orig_seuser
+ if serange is None:
+ serange = orig_serange
+
+ if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
+ if not module.check_mode:
+ sefcontext.modify(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Change to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
+ else:
+ # Add missing entry
+ if seuser is None:
+ seuser = 'system_u'
+ if serange is None:
+ serange = 's0'
+
+ if not module.check_mode:
+ sefcontext.add(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Addition to semanage file context mappings\n'
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
+
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
+
+def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
+ ''' Delete SELinux file context mapping definition from the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Remove existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if not module.check_mode:
+ sefcontext.delete(target, ftype)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Deletion to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
+
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, **result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ target = dict(required=True, aliases=['path']),
+ ftype = dict(required=False, choices=option_to_file_type_str.keys(), default='a'),
+ setype = dict(required=True),
+ seuser = dict(required=False, default=None),
+ selevel = dict(required=False, default=None, aliases=['serange']),
+ state = dict(required=False, choices=['present', 'absent'], default='present'),
+ reload = dict(required=False, type='bool', default='yes'),
+ ),
+ supports_check_mode = True,
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg="This module requires libselinux-python")
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg="This module requires policycoreutils-python")
+
+ if not selinux.is_selinux_enabled():
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ target = module.params['target']
+ ftype = module.params['ftype']
+ setype = module.params['setype']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = dict(target=target, ftype=ftype, setype=setype, state=state)
+
+ if state == 'present':
+ semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
+ elif state == 'absent':
+ semanage_fcontext_delete(module, result, target, ftype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/system/selinux_permissive.py b/system/selinux_permissive.py
index 1e2a5c6c996..fed5db2bcf2 100644
--- a/system/selinux_permissive.py
+++ b/system/selinux_permissive.py
@@ -19,6 +19,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: selinux_permissive
@@ -56,7 +60,9 @@
'''
EXAMPLES = '''
-- selinux_permissive: name=httpd_t permissive=true
+- selinux_permissive:
+ name: httpd_t
+ permissive: true
'''
HAVE_SEOBJECT = False
@@ -65,6 +71,8 @@
HAVE_SEOBJECT = True
except ImportError:
pass
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
def main():
@@ -90,7 +98,8 @@ def main():
try:
permissive_domains = seobject.permissiveRecords(store)
- except ValueError, e:
+ except ValueError:
+ e = get_exception()
module.fail_json(domain=domain, msg=str(e))
# not supported on EL 6
@@ -99,7 +108,8 @@ def main():
try:
all_domains = permissive_domains.get_all()
- except ValueError, e:
+ except ValueError:
+ e = get_exception()
module.fail_json(domain=domain, msg=str(e))
if permissive:
@@ -107,7 +117,8 @@ def main():
if not module.check_mode:
try:
permissive_domains.add(domain)
- except ValueError, e:
+ except ValueError:
+ e = get_exception()
module.fail_json(domain=domain, msg=str(e))
changed = True
else:
@@ -115,7 +126,8 @@ def main():
if not module.check_mode:
try:
permissive_domains.delete(domain)
- except ValueError, e:
+ except ValueError:
+ e = get_exception()
module.fail_json(domain=domain, msg=str(e))
changed = True
@@ -123,8 +135,5 @@ def main():
permissive=permissive, domain=domain)
-#################################################
-# import module snippets
-from ansible.module_utils.basic import *
-
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/seport.py b/system/seport.py
index fb1cef661a2..bbd049c030c 100644
--- a/system/seport.py
+++ b/system/seport.py
@@ -17,13 +17,17 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: seport
short_description: Manages SELinux network port type definitions
description:
- Manages SELinux network port type definitions.
-version_added: "1.7.1"
+version_added: "2.0"
options:
ports:
description:
@@ -61,11 +65,25 @@
EXAMPLES = '''
# Allow Apache to listen on tcp port 8888
-- seport: ports=8888 proto=tcp setype=http_port_t state=present
+- seport:
+ ports: 8888
+ proto: tcp
+ setype: http_port_t
+ state: present
+
# Allow sshd to listen on tcp port 8991
-- seport: ports=8991 proto=tcp setype=ssh_port_t state=present
+- seport:
+ ports: 8991
+ proto: tcp
+ setype: ssh_port_t
+ state: present
+
# Allow memcached to listen on tcp ports 10000-10100 and 10112
-- seport: ports=10000-10100,10112 proto=tcp setype=memcache_port_t state=present
+- seport:
+ ports: 10000-10100,10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
'''
try:
@@ -80,10 +98,33 @@
except ImportError:
HAVE_SEOBJECT=False
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+
+def semanage_port_get_ports(seport, setype, proto):
+ """ Get the list of ports that have the specified type definition.
+
+ :param seport: Instance of seobject.portRecords
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: list
+ :return: List of ports that have the specified SELinux type.
+ """
+ records = seport.get_all_by_type()
+ if (setype, proto) in records:
+ return records[(setype, proto)]
+ else:
+ return []
+
-def semanage_port_exists(seport, port, proto):
- """ Get the SELinux port type definition from policy. Return None if it does
- not exist.
+def semanage_port_get_type(seport, port, proto):
+ """ Get the SELinux type of the specified port.
:param seport: Instance of seobject.portRecords
@@ -93,15 +134,19 @@ def semanage_port_exists(seport, port, proto):
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
- :rtype: bool
- :return: True if the SELinux port type definition exists, False otherwise
+ :rtype: tuple
+ :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
"""
ports = port.split('-', 1)
if len(ports) == 1:
ports.extend(ports)
- ports = map(int, ports)
- record = (ports[0], ports[1], proto)
- return record in seport.get_all()
+ key = (int(ports[0]), int(ports[1]), proto)
+
+ records = seport.get_all()
+ if key in records:
+ return records[key]
+ else:
+ return None
def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
@@ -135,27 +180,36 @@ def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', ses
seport = seobject.portRecords(sestore)
seport.set_reload(do_reload)
change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
for port in ports:
- exists = semanage_port_exists(seport, port, proto)
- if not exists and not module.check_mode:
- seport.add(port, proto, serange, setype)
- change = change or not exists
-
- except ValueError, e:
+ if port not in ports_by_type:
+ change = True
+ port_type = semanage_port_get_type(seport, port, proto)
+ if port_type is None and not module.check_mode:
+ seport.add(port, proto, serange, setype)
+ elif port_type is not None and not module.check_mode:
+ seport.modify(port, proto, serange, setype)
+
+ except ValueError:
+ e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
- except IOError, e:
+ except IOError:
+ e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
- except KeyError, e:
+ except KeyError:
+ e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
- except OSError, e:
+ except OSError:
+ e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
- except RuntimeError, e:
+ except RuntimeError:
+ e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
return change
-def semanage_port_del(module, ports, proto, do_reload, sestore=''):
+def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):
""" Delete SELinux port type definition from the policy.
:type module: AnsibleModule
@@ -167,6 +221,9 @@ def semanage_port_del(module, ports, proto, do_reload, sestore=''):
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
+ :type setype: str
+ :param setype: SELinux type.
+
:type do_reload: bool
:param do_reload: Whether to reload SELinux policy after commit
@@ -180,21 +237,27 @@ def semanage_port_del(module, ports, proto, do_reload, sestore=''):
seport = seobject.portRecords(sestore)
seport.set_reload(do_reload)
change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
for port in ports:
- exists = semanage_port_exists(seport, port, proto)
- if not exists and not module.check_mode:
- seport.delete(port, proto)
- change = change or not exists
+ if port in ports_by_type:
+ change = True
+ if not module.check_mode:
+ seport.delete(port, proto)
- except ValueError, e:
+ except ValueError:
+ e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
- except IOError,e:
+ except IOError:
+ e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
- except KeyError, e:
+ except KeyError:
+ e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
- except OSError, e:
+ except OSError:
+ e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
- except RuntimeError, e:
+ except RuntimeError:
+ e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
return change
@@ -234,7 +297,7 @@ def main():
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
- ports = [x.strip() for x in module.params['ports'].split(',')]
+ ports = [x.strip() for x in str(module.params['ports']).split(',')]
proto = module.params['proto']
setype = module.params['setype']
state = module.params['state']
@@ -250,12 +313,12 @@ def main():
if state == 'present':
result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)
elif state == 'absent':
- result['changed'] = semanage_port_del(module, ports, proto, do_reload)
+ result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
module.exit_json(**result)
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/solaris_zone.py b/system/solaris_zone.py
index 8c8d22305bc..85e0f41a1ca 100644
--- a/system/solaris_zone.py
+++ b/system/solaris_zone.py
@@ -22,6 +22,10 @@
import platform
import tempfile
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: solaris_zone
@@ -104,31 +108,55 @@
EXAMPLES = '''
# Create and install a zone, but don't boot it
-solaris_zone: name=zone1 state=present path=/zones/zone1 sparse=true root_password="Be9oX7OSwWoU."
- config='set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+- solaris_zone:
+ name: zone1
+ state: present
+ path: /zones/zone1
+ sparse: true
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
# Create and install a zone and boot it
-solaris_zone: name=zone1 state=running path=/zones/zone1 root_password="Be9oX7OSwWoU."
- config='set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+- solaris_zone:
+ name: zone1
+ state: running
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
# Boot an already installed zone
-solaris_zone: name=zone1 state=running
+- solaris_zone:
+ name: zone1
+ state: running
# Stop a zone
-solaris_zone: name=zone1 state=stopped
+- solaris_zone:
+ name: zone1
+ state: stopped
# Destroy a zone
-solaris_zone: name=zone1 state=absent
+- solaris_zone:
+ name: zone1
+ state: absent
# Detach a zone
-solaris_zone: name=zone1 state=detached
+- solaris_zone:
+ name: zone1
+ state: detached
# Configure a zone, ready to be attached
-solaris_zone: name=zone1 state=configured path=/zones/zone1 root_password="Be9oX7OSwWoU."
- config='set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+- solaris_zone:
+ name: zone1
+ state: configured
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
# Attach a zone
-solaris_zone: name=zone1 state=attached attach_options='-u'
+- solaris_zone:
+ name: zone1
+ state: attached
+ attach_options=: -u
'''
class Zone(object):
@@ -417,9 +445,9 @@ def main():
argument_spec = dict(
name = dict(required=True),
state = dict(default='present', choices=['running', 'started', 'present', 'installed', 'stopped', 'absent', 'configured', 'detached', 'attached']),
- path = dict(defalt=None),
+ path = dict(default=None),
sparse = dict(default=False, type='bool'),
- root_password = dict(default=None),
+ root_password = dict(default=None, no_log=True),
timeout = dict(default=600, type='int'),
config = dict(default=''),
create_options = dict(default=''),
@@ -453,4 +481,6 @@ def main():
module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/system/svc.py b/system/svc.py
old mode 100644
new mode 100755
index 9831ce42ea7..378d647bee9
--- a/system/svc.py
+++ b/system/svc.py
@@ -18,11 +18,15 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: svc
author: "Brian Coca (@bcoca)"
-version_added:
+version_added: "1.9"
short_description: Manage daemontools services.
description:
- Controls daemontools services on remote hosts using the svc utility.
@@ -38,7 +42,7 @@
- C(Started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
svc (svc -t) and C(killed) will always bounce the svc (svc -k).
- C(reloaded) will send a sigusr1 (svc -u).
+ C(reloaded) will send a sigusr1 (svc -1).
C(once) will run a normally downed svc once (svc -o), not really
an idempotent operation.
downed:
@@ -67,26 +71,41 @@
EXAMPLES = '''
# Example action to start svc dnscache, if not running
- - svc: name=dnscache state=started
+ - svc:
+ name: dnscache
+ state: started
# Example action to stop svc dnscache, if running
- - svc: name=dnscache state=stopped
+ - svc:
+ name: dnscache
+ state: stopped
# Example action to kill svc dnscache, in all cases
- - svc : name=dnscache state=killed
+ - svc:
+ name: dnscache
+ state: killed
# Example action to restart svc dnscache, in all cases
- - svc : name=dnscache state=restarted
+ - svc:
+ name: dnscache
+ state: restarted
# Example action to reload svc dnscache, in all cases
- - svc: name=dnscache state=reloaded
+ - svc:
+ name: dnscache
+ state: reloaded
# Example using alt svc directory location
- - svc: name=dnscache state=reloaded service_dir=/var/service
+ - svc:
+ name: dnscache
+ state: reloaded
+ service_dir: /var/service
'''
import platform
import shlex
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.basic import *
def _load_dist_subclass(cls, *args, **kwargs):
'''
@@ -152,7 +171,8 @@ def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
- except OSError, e:
+ except OSError:
+ e = get_exception()
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
@@ -160,7 +180,8 @@ def enable(self):
def disable(self):
try:
os.unlink(self.svc_full)
- except OSError, e:
+ except OSError:
+ e = get_exception()
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e))
self.execute_command([self.svc_cmd,'-dx',self.src_full])
@@ -221,7 +242,8 @@ def kill(self):
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
- except Exception, e:
+ except Exception:
+ e = get_exception()
self.module.fail_json(msg="failed to execute: %s" % str(e))
return (rc, out, err)
@@ -240,8 +262,8 @@ def main():
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']),
- enabled = dict(required=False, type='bool', choices=BOOLEANS),
- downed = dict(required=False, type='bool', choices=BOOLEANS),
+ enabled = dict(required=False, type='bool'),
+ downed = dict(required=False, type='bool'),
dist = dict(required=False, default='daemontools'),
service_dir = dict(required=False, default='/service'),
service_src = dict(required=False, default='/etc/service'),
@@ -249,6 +271,8 @@ def main():
supports_check_mode=True,
)
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
state = module.params['state']
enabled = module.params['enabled']
downed = module.params['downed']
@@ -265,7 +289,8 @@ def main():
svc.enable()
else:
svc.disable()
- except (OSError, IOError), e:
+ except (OSError, IOError):
+ e = get_exception()
module.fail_json(msg="Could change service link: %s" % str(e))
if state is not None and state != svc.state:
@@ -282,13 +307,14 @@ def main():
open(d_file, "a").close()
else:
os.unlink(d_file)
- except (OSError, IOError), e:
+ except (OSError, IOError):
+ e = get_exception()
module.fail_json(msg="Could change downed file: %s " % (str(e)))
module.exit_json(changed=changed, svc=svc.report())
-# this is magic, not normal python include
-from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/system/timezone.py b/system/timezone.py
new file mode 100644
index 00000000000..7d8d9aef76c
--- /dev/null
+++ b/system/timezone.py
@@ -0,0 +1,467 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Shinichi TAMURA (@tmshn)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import os
+import re
+from ansible.module_utils.basic import AnsibleModule, get_platform
+from ansible.module_utils.six import iteritems
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: timezone
+short_description: Configure timezone setting
+description:
+ - This module configures the timezone setting, both of the system clock
+ and of the hardware clock. I(Currently only Linux platform is supported.)
+ It is recommended to restart C(crond) after changing the timezone,
+ otherwise the jobs may run at the wrong time.
+ It uses the C(timedatectl) command if available. Otherwise, it edits
+ C(/etc/sysconfig/clock) or C(/etc/timezone) for the system clock,
+ and uses the C(hwclock) command for the hardware clock.
+ If you want to set up the NTP, use M(service) module.
+version_added: "2.2.0"
+options:
+ name:
+ description:
+ - Name of the timezone for the system clock.
+ Default is to keep current setting.
+ required: false
+ hwclock:
+ description:
+ - Whether the hardware clock is in UTC or in local timezone.
+ Default is to keep current setting.
+ Note that this option is recommended not to change and may fail
+ to configure, especially on virtual environments such as AWS.
+ required: false
+ aliases: ['rtc']
+author: "Shinichi TAMURA (@tmshn)"
+'''
+
+RETURN = '''
+diff:
+ description: The differences about the given arguments.
+ returned: success
+ type: dictionary
+ contains:
+ before:
+ description: The values before change
+ type: dict
+ after:
+ description: The values after change
+ type: dict
+'''
+
+EXAMPLES = '''
+- name: set timezone to Asia/Tokyo
+ timezone:
+ name: Asia/Tokyo
+'''
+
+
+class Timezone(object):
+ """This is a generic Timezone manipulation class that is subclassed based on platform.
+
+ A subclass may wish to override the following action methods:
+ - get(key, phase) ... get the value from the system at `phase`
+ - set(key, value) ... set the value to the current system
+ """
+
+ def __new__(cls, module):
+ """Return the platform-specific subclass.
+
+ It does not use load_platform_subclass() because it need to judge based
+ on whether the `timedatectl` command exists.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ if get_platform() == 'Linux':
+ if module.get_bin_path('timedatectl') is not None:
+ return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
+ else:
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ else:
+ # Not supported yet
+ return super(Timezone, Timezone).__new__(Timezone)
+
+ def __init__(self, module):
+ """Initialize of the class.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ super(Timezone, self).__init__()
+ self.msg = []
+ # `self.value` holds the values for each params on each phases.
+ # Initially there's only info of "planned" phase, but the
+ # `self.check()` function will fill out it.
+ self.value = dict()
+ for key in module.argument_spec:
+ value = module.params[key]
+ if value is not None:
+ self.value[key] = dict(planned=value)
+ self.module = module
+
+ def abort(self, msg):
+ """Abort the process with error message.
+
+ This is just the wrapper of module.fail_json().
+
+ Args:
+ msg: The error message.
+ """
+ error_msg = ['Error message:', msg]
+ if len(self.msg) > 0:
+ error_msg.append('Other message(s):')
+ error_msg.extend(self.msg)
+ self.module.fail_json(msg='\n'.join(error_msg))
+
+ def execute(self, *commands, **kwargs):
+ """Execute the shell command.
+
+ This is just the wrapper of module.run_command().
+
+ Args:
+ *commands: The command to execute.
+ It will be concatenated with single space.
+ **kwargs: Only 'log' key is checked.
+ If kwargs['log'] is true, record the command to self.msg.
+
+ Returns:
+ stdout: Standard output of the command.
+ """
+ command = ' '.join(commands)
+ (rc, stdout, stderr) = self.module.run_command(command, check_rc=True)
+ if kwargs.get('log', False):
+ self.msg.append('executed `%s`' % command)
+ return stdout
+
+ def diff(self, phase1='before', phase2='after'):
+ """Calculate the difference between given 2 phases.
+
+ Args:
+ phase1, phase2: The names of phase to compare.
+
+ Returns:
+ diff: The difference of value between phase1 and phase2.
+ This is in the format which can be used with the
+ `--diff` option of ansible-playbook.
+ """
+ diff = {phase1: {}, phase2: {}}
+ for key, value in iteritems(self.value):
+ diff[phase1][key] = value[phase1]
+ diff[phase2][key] = value[phase2]
+ return diff
+
+ def check(self, phase):
+ """Check the state in given phase and set it to `self.value`.
+
+ Args:
+ phase: The name of the phase to check.
+
+ Returns:
+ NO RETURN VALUE
+ """
+ if phase == 'planned':
+ return
+ for key, value in iteritems(self.value):
+ value[phase] = self.get(key, phase)
+
+ def change(self):
+ """Make the changes effect based on `self.value`."""
+ for key, value in iteritems(self.value):
+ if value['before'] != value['planned']:
+ self.set(key, value['planned'])
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get(self, key, phase):
+ """Get the value for the key at the given phase.
+
+ Called from self.check().
+
+ Args:
+ key: The key to get the value
+ phase: The phase to get the value
+
+ Return:
+ value: The value for the key at the given phase.
+ """
+ self.abort('get(key, phase) is not implemented on target platform')
+
+ def set(self, key, value):
+ """Set the value for the key (of course, for the phase 'after').
+
+ Called from self.change().
+
+ Args:
+ key: Key to set the value
+ value: Value to set
+ """
+ self.abort('set(key, value) is not implemented on target platform')
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ tzfile = '/usr/share/zoneinfo/%s' % tz
+ if not os.path.isfile(tzfile):
+ self.abort('given timezone "%s" is not available' % tz)
+
+
+class SystemdTimezone(Timezone):
+ """This is a Timezone manipulation class systemd-powered Linux.
+
+ It uses the `timedatectl` command to check/set all arguments.
+ """
+
+ regexps = dict(
+ hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE),
+ name =re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ subcmds = dict(
+ hwclock='set-local-rtc',
+ name ='set-timezone'
+ )
+
+ def __init__(self, module):
+ super(SystemdTimezone, self).__init__(module)
+ self.timedatectl = module.get_bin_path('timedatectl', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_status(self, phase):
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.timedatectl, 'status')
+ return self.status[phase]
+
+ def get(self, key, phase):
+ status = self._get_status(phase)
+ value = self.regexps[key].search(status).group(1)
+ if key == 'hwclock':
+ # For key='hwclock'; convert yes/no -> local/UTC
+ if self.module.boolean(value):
+ value = 'local'
+ else:
+ value = 'UTC'
+ return value
+
+ def set(self, key, value):
+ # For key='hwclock'; convert UTC/local -> yes/no
+ if key == 'hwclock':
+ if value == 'local':
+ value = 'yes'
+ else:
+ value = 'no'
+ self.execute(self.timedatectl, self.subcmds[key], value, log=True)
+
+
+class NosystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for non systemd-powered Linux.
+
+ For timezone setting, it edits the following file and reflect changes:
+ - /etc/sysconfig/clock ... RHEL/CentOS
+ - /etc/timezone ... Debian/Ubuntu
+ For hwclock setting, it executes `hwclock --systohc` command with the
+ '--utc' or '--localtime' option.
+ """
+
+ conf_files = dict(
+ name =None, # To be set in __init__
+ hwclock=None, # To be set in __init__
+ adjtime='/etc/adjtime'
+ )
+
+ regexps = dict(
+ name =None, # To be set in __init__
+ hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE),
+ adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE)
+ )
+
+ def __init__(self, module):
+ super(NosystemdTimezone, self).__init__(module)
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+ self.update_timezone = self.module.get_bin_path('cp', required=True)
+ self.update_timezone += ' %s /etc/localtime' % tzfile
+ self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
+ # Distribution-specific configurations
+ if self.module.get_bin_path('dpkg-reconfigure') is not None:
+ # Debian/Ubuntu
+ self.update_timezone = self.module.get_bin_path('dpkg-reconfigure', required=True)
+ self.update_timezone += ' --frontend noninteractive tzdata'
+ self.conf_files['name'] = '/etc/timezone'
+ self.conf_files['hwclock'] = '/etc/default/rcS'
+ self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE)
+ self.tzline_format = '%s\n'
+ else:
+ # RHEL/CentOS
+ if self.module.get_bin_path('tzdata-update') is not None:
+ self.update_timezone = self.module.get_bin_path('tzdata-update', required=True)
+ # else:
+ # self.update_timezone = 'cp ...' <- configured above
+ self.conf_files['name'] = '/etc/sysconfig/clock'
+ self.conf_files['hwclock'] = '/etc/sysconfig/clock'
+ self.regexps['name'] = re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE)
+ self.tzline_format = 'ZONE="%s"\n'
+ self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
+
+ def _edit_file(self, filename, regexp, value):
+ """Replace the first matched line with given `value`.
+
+ If `regexp` matched more than once, other than the first line will be deleted.
+
+ Args:
+ filename: The name of the file to edit.
+ regexp: The regular expression to search with.
+ value: The line which will be inserted.
+ """
+ # Read the file
+ try:
+ file = open(filename, 'r')
+ except IOError:
+ self.abort('cannot read "%s"' % filename)
+ else:
+ lines = file.readlines()
+ file.close()
+ # Find the all matched lines
+ matched_indices = []
+ for i, line in enumerate(lines):
+ if regexp.search(line):
+ matched_indices.append(i)
+ if len(matched_indices) > 0:
+ insert_line = matched_indices[0]
+ else:
+ insert_line = 0
+ # Remove all matched lines
+ for i in matched_indices[::-1]:
+ del lines[i]
+ # ...and insert the value
+ lines.insert(insert_line, value)
+ # Write the changes
+ try:
+ file = open(filename, 'w')
+ except IOError:
+ self.abort('cannot write to "%s"' % filename)
+ else:
+ file.writelines(lines)
+ file.close()
+ self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
+
+ def get(self, key, phase):
+ if key == 'hwclock' and os.path.isfile('/etc/adjtime'):
+ # If /etc/adjtime exists, use that file.
+ key = 'adjtime'
+
+ filename = self.conf_files[key]
+
+ try:
+ file = open(filename, mode='r')
+ except IOError:
+ self.abort('cannot read configuration file "%s" for %s' % (filename, key))
+ else:
+ status = file.read()
+ file.close()
+ try:
+ value = self.regexps[key].search(status).group(1)
+ except AttributeError:
+ self.abort('cannot find the valid value from configuration file "%s" for %s' % (filename, key))
+ else:
+ if key == 'hwclock':
+ # For key='hwclock'; convert yes/no -> UTC/local
+ if self.module.boolean(value):
+ value = 'UTC'
+ else:
+ value = 'local'
+ elif key == 'adjtime':
+ # For key='adjtime'; convert LOCAL -> local
+ if value != 'UTC':
+ value = value.lower()
+ return value
+
+ def set_timezone(self, value):
+ self._edit_file(filename=self.conf_files['name'],
+ regexp=self.regexps['name'],
+ value=self.tzline_format % value)
+ self.execute(self.update_timezone)
+
+ def set_hwclock(self, value):
+ if value == 'local':
+ option = '--localtime'
+ else:
+ option = '--utc'
+ self.execute(self.update_hwclock, '--systohc', option, log=True)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.set_timezone(value)
+ elif key == 'hwclock':
+ self.set_hwclock(value)
+ else:
+ self.abort('unknown parameter "%s"' % key)
+
+
+def main():
+ # Construct 'module' and 'tz'
+ arg_spec = dict(
+ hwclock=dict(choices=['UTC', 'local'], aliases=['rtc']),
+ name =dict(),
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ required_one_of=[arg_spec.keys()],
+ supports_check_mode=True
+ )
+ tz = Timezone(module)
+
+ # Check the current state
+ tz.check(phase='before')
+ if module.check_mode:
+ diff = tz.diff('before', 'planned')
+ # In check mode, 'planned' state is treated as 'after' state
+ diff['after'] = diff.pop('planned')
+ else:
+ # Make change
+ tz.change()
+ # Check the current state
+ tz.check(phase='after')
+ # Examine if the current state matches planned state
+ (after, planned) = tz.diff('after', 'planned').values()
+ if after != planned:
+ tz.abort('still not desired state, though changes have made')
+ diff = tz.diff('before', 'after')
+
+ changed = (diff['before'] != diff['after'])
+ if len(tz.msg) > 0:
+ module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg))
+ else:
+ module.exit_json(changed=changed, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/system/ufw.py b/system/ufw.py
index cd148edf2ef..6d381785bc5 100644
--- a/system/ufw.py
+++ b/system/ufw.py
@@ -21,6 +21,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ufw
@@ -125,55 +129,103 @@
EXAMPLES = '''
# Allow everything and enable UFW
-ufw: state=enabled policy=allow
+- ufw:
+ state: enabled
+ policy: allow
# Set logging
-ufw: logging=on
+- ufw:
+ logging: on
# Sometimes it is desirable to let the sender know when traffic is
# being denied, rather than simply ignoring it. In these cases, use
# reject instead of deny. In addition, log rejected connections:
-ufw: rule=reject port=auth log=yes
+- ufw:
+ rule: reject
+ port: auth
+ log: yes
# ufw supports connection rate limiting, which is useful for protecting
# against brute-force login attacks. ufw will deny connections if an IP
# address has attempted to initiate 6 or more connections in the last
# 30 seconds. See http://www.debian-administration.org/articles/187
# for details. Typical usage is:
-ufw: rule=limit port=ssh proto=tcp
-
-# Allow OpenSSH
-ufw: rule=allow name=OpenSSH
+- ufw:
+ rule: limit
+ port: ssh
+ proto: tcp
+
+# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
+# a rule=allow task can leave those ports exposed. Either use delete=yes
+# or a separate state=reset task)
+- ufw:
+ rule: allow
+ name: OpenSSH
# Delete OpenSSH rule
-ufw: rule=allow name=OpenSSH delete=yes
+- ufw:
+ rule: allow
+ name: OpenSSH
+ delete: yes
# Deny all access to port 53:
-ufw: rule=deny port=53
+- ufw:
+ rule: deny
+ port: 53
+
+# Allow port range 60000-61000
+- ufw:
+ rule: allow
+ port: '60000:61000'
# Allow all access to tcp port 80:
-ufw: rule=allow port=80 proto=tcp
+- ufw:
+ rule: allow
+ port: 80
+ proto: tcp
# Allow all access from RFC1918 networks to this host:
-ufw: rule=allow src={{ item }}
-with_items:
-- 10.0.0.0/8
-- 172.16.0.0/12
-- 192.168.0.0/16
+- ufw:
+ rule: allow
+ src: '{{ item }}'
+ with_items:
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
# Deny access to udp port 514 from host 1.2.3.4:
-ufw: rule=deny proto=udp src=1.2.3.4 port=514
+- ufw:
+ rule: deny
+ proto: udp
+ src: 1.2.3.4
+ port: 514
# Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
-ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 dest=1.2.3.4 to_port=5469
+- ufw:
+ rule: allow
+ interface: eth0
+ direction: in
+ proto: udp
+ src: 1.2.3.5
+ from_port: 5469
+ dest: 1.2.3.4
+ to_port: 5469
# Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host.
# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
-ufw: rule=deny proto=tcp src=2001:db8::/32 port=25
+- ufw:
+ rule: deny
+ proto: tcp
+ src: '2001:db8::/32'
+ port: 25
# Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24.
# Can be used to further restrict a global FORWARD policy set to allow
-ufw: rule=deny route=yes src=1.2.3.0/24 dest=4.5.6.0/24
+- ufw:
+ rule: deny
+ route: yes
+ src: 1.2.3.0/24
+ dest: 4.5.6.0/24
'''
from operator import itemgetter
@@ -223,7 +275,7 @@ def execute(cmd):
if len(commands) < 1:
module.fail_json(msg="Not any of the command arguments %s given" % commands)
- if('interface' in params and 'direction' not in params):
+ if(params['interface'] is not None and params['direction'] is None):
module.fail_json(msg="Direction must be specified when creating a rule on an interface")
# Ensure ufw is available
@@ -258,10 +310,11 @@ def execute(cmd):
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([params['insert'], "insert %s" % params['insert']])
cmd.append([value])
+ cmd.append([params['direction'], "%s" % params['direction']])
+ cmd.append([params['interface'], "on %s" % params['interface']])
cmd.append([module.boolean(params['log']), 'log'])
- for (key, template) in [('direction', "%s" ), ('interface', "on %s" ),
- ('from_ip', "from %s" ), ('from_port', "port %s" ),
+ for (key, template) in [('from_ip', "from %s" ), ('from_port', "port %s" ),
('to_ip', "to %s" ), ('to_port', "port %s" ),
('proto', "proto %s"), ('app', "app '%s'")]:
@@ -280,4 +333,5 @@ def execute(cmd):
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/zfs.py b/system/zfs.py
index 51b9db63692..d95971455ed 100644
--- a/system/zfs.py
+++ b/system/zfs.py
@@ -19,12 +19,16 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: zfs
short_description: Manage zfs
description:
- - Manages ZFS file systems on Solaris and FreeBSD. Can manage file systems, volumes and snapshots. See zfs(1M) for more information about the properties.
+ - Manages ZFS file systems, volumes, clones and snapshots.
version_added: "1.1"
options:
name:
@@ -33,224 +37,104 @@
required: true
state:
description:
- - Whether to create (C(present)), or remove (C(absent)) a file system, snapshot or volume.
+ - Whether to create (C(present)), or remove (C(absent)) a
+ file system, snapshot or volume. All parents/children
+ will be created/destroyed as needed to reach the desired state.
+ choices: ['present', 'absent']
required: true
- choices: [present, absent]
- aclinherit:
- description:
- - The aclinherit property.
- required: False
- choices: [discard,noallow,restricted,passthrough,passthrough-x]
- aclmode:
- description:
- - The aclmode property.
- required: False
- choices: [discard,groupmask,passthrough]
- atime:
- description:
- - The atime property.
- required: False
- choices: ['on','off']
- canmount:
- description:
- - The canmount property.
- required: False
- choices: ['on','off','noauto']
- casesensitivity:
- description:
- - The casesensitivity property.
- required: False
- choices: [sensitive,insensitive,mixed]
- checksum:
- description:
- - The checksum property.
- required: False
- choices: ['on','off',fletcher2,fletcher4,sha256]
- compression:
- description:
- - The compression property.
- required: False
- choices: ['on','off',lzjb,gzip,gzip-1,gzip-2,gzip-3,gzip-4,gzip-5,gzip-6,gzip-7,gzip-8,gzip-9,lz4,zle]
- copies:
- description:
- - The copies property.
- required: False
- choices: [1,2,3]
- dedup:
- description:
- - The dedup property.
- required: False
- choices: ['on','off']
- devices:
- description:
- - The devices property.
- required: False
- choices: ['on','off']
- exec:
- description:
- - The exec property.
- required: False
- choices: ['on','off']
- jailed:
- description:
- - The jailed property.
- required: False
- choices: ['on','off']
- logbias:
- description:
- - The logbias property.
- required: False
- choices: [latency,throughput]
- mountpoint:
- description:
- - The mountpoint property.
- required: False
- nbmand:
- description:
- - The nbmand property.
- required: False
- choices: ['on','off']
- normalization:
- description:
- - The normalization property.
- required: False
- choices: [none,formC,formD,formKC,formKD]
origin:
description:
- - Name of the snapshot to clone
- required: False
- version_added: "2.0"
- primarycache:
- description:
- - The primarycache property.
- required: False
- choices: [all,none,metadata]
- quota:
- description:
- - The quota property.
- required: False
- readonly:
- description:
- - The readonly property.
- required: False
- choices: ['on','off']
- recordsize:
- description:
- - The recordsize property.
- required: False
- refquota:
- description:
- - The refquota property.
- required: False
- refreservation:
- description:
- - The refreservation property.
- required: False
- reservation:
- description:
- - The reservation property.
- required: False
- secondarycache:
- description:
- - The secondarycache property.
- required: False
- choices: [all,none,metadata]
- setuid:
- description:
- - The setuid property.
- required: False
- choices: ['on','off']
- shareiscsi:
- description:
- - The shareiscsi property.
- required: False
- choices: ['on','off']
- sharenfs:
+ - Snapshot from which to create a clone
+ default: null
+ required: false
+ key_value:
description:
- - The sharenfs property.
- required: False
- sharesmb:
- description:
- - The sharesmb property.
- required: False
- snapdir:
- description:
- - The snapdir property.
- required: False
- choices: [hidden,visible]
- sync:
- description:
- - The sync property.
- required: False
- choices: ['standard','always','disabled']
- utf8only:
- description:
- - The utf8only property.
- required: False
- choices: ['on','off']
- volsize:
- description:
- - The volsize property.
- required: False
- volblocksize:
- description:
- - The volblocksize property.
- required: False
- vscan:
- description:
- - The vscan property.
- required: False
- choices: ['on','off']
- xattr:
- description:
- - The xattr property.
- required: False
- choices: ['on','off']
- zoned:
- description:
- - The zoned property.
- required: False
- choices: ['on','off']
+ - The C(zfs) module takes key=value pairs for zfs properties to be set. See the zfs(8) man page for more information.
+ default: null
+ required: false
+
author: "Johan Wiren (@johanwiren)"
'''
EXAMPLES = '''
-# Create a new file system called myfs in pool rpool
-- zfs: name=rpool/myfs state=present
-
-# Create a new volume called myvol in pool rpool.
-- zfs: name=rpool/myvol state=present volsize=10M
+# Create a new file system called myfs in pool rpool with the setuid property turned off
+- zfs:
+ name: rpool/myfs
+ state: present
+ setuid: off
+
+# Create a new volume called myvol in pool rpool.
+- zfs:
+ name: rpool/myvol
+ state: present
+ volsize: 10M
# Create a snapshot of rpool/myfs file system.
-- zfs: name=rpool/myfs@mysnapshot state=present
+- zfs:
+ name: rpool/myfs@mysnapshot
+ state: present
# Create a new file system called myfs2 with snapdir enabled
-- zfs: name=rpool/myfs2 state=present snapdir=enabled
+- zfs:
+ name: rpool/myfs2
+ state: present
+ snapdir: enabled
# Create a new file system by cloning a snapshot
-- zfs: name=rpool/cloned_fs state=present origin=rpool/myfs@mysnapshot
+- zfs:
+ name: rpool/cloned_fs
+ state: present
+ origin: rpool/myfs@mysnapshot
# Destroy a filesystem
-- zfs: name=rpool/myfs state=absent
+- zfs:
+ name: rpool/myfs
+ state: absent
'''
import os
+
class Zfs(object):
+
def __init__(self, module, name, properties):
self.module = module
self.name = name
self.properties = properties
self.changed = False
-
- self.immutable_properties = [ 'casesensitivity', 'normalization', 'utf8only' ]
+ self.zfs_cmd = module.get_bin_path('zfs', True)
+ self.zpool_cmd = module.get_bin_path('zpool', True)
+ self.pool = name.split('/')[0]
+ self.is_solaris = os.uname()[0] == 'SunOS'
+ self.is_openzfs = self.check_openzfs()
+ self.enhanced_sharing = self.check_enhanced_sharing()
+
+ def check_openzfs(self):
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if version == '-':
+ return True
+ if int(version) == 5000:
+ return True
+ return False
+
+ def check_enhanced_sharing(self):
+ if self.is_solaris and not self.is_openzfs:
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if int(version) >= 34:
+ return True
+ return False
def exists(self):
- cmd = [self.module.get_bin_path('zfs', True)]
- cmd.append('list')
- cmd.append('-t all')
- cmd.append(self.name)
+ cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
return True
@@ -265,6 +149,8 @@ def create(self):
volsize = properties.pop('volsize', None)
volblocksize = properties.pop('volblocksize', None)
origin = properties.pop('origin', None)
+ cmd = [self.zfs_cmd]
+
if "@" in self.name:
action = 'snapshot'
elif origin:
@@ -272,135 +158,83 @@ def create(self):
else:
action = 'create'
- cmd = [self.module.get_bin_path('zfs', True)]
cmd.append(action)
- if createparent:
- cmd.append('-p')
+ if action in ['create', 'clone']:
+ cmd += ['-p']
+ if volsize:
+ cmd += ['-V', volsize]
if volblocksize:
- cmd.append('-b %s' % volblocksize)
+ cmd += ['-b', 'volblocksize']
if properties:
for prop, value in properties.iteritems():
- cmd.append('-o %s="%s"' % (prop, value))
- if volsize:
- cmd.append('-V')
- cmd.append(volsize)
+ cmd += ['-o', '%s="%s"' % (prop, value)]
if origin:
cmd.append(origin)
cmd.append(self.name)
- (rc, err, out) = self.module.run_command(' '.join(cmd))
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
- self.module.fail_json(msg=out)
+ self.module.fail_json(msg=err)
def destroy(self):
if self.module.check_mode:
self.changed = True
return
- cmd = [self.module.get_bin_path('zfs', True)]
- cmd.append('destroy')
- cmd.append(self.name)
- (rc, err, out) = self.module.run_command(' '.join(cmd))
+ cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
- self.module.fail_json(msg=out)
+ self.module.fail_json(msg=err)
def set_property(self, prop, value):
if self.module.check_mode:
self.changed = True
return
- cmd = self.module.get_bin_path('zfs', True)
- args = [cmd, 'set', prop + '=' + value, self.name]
- (rc, err, out) = self.module.run_command(args)
+ cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
+ (rc, out, err) = self.module.run_command(cmd)
if rc == 0:
self.changed = True
else:
- self.module.fail_json(msg=out)
+ self.module.fail_json(msg=err)
def set_properties_if_changed(self):
current_properties = self.get_current_properties()
for prop, value in self.properties.iteritems():
- if current_properties[prop] != value:
- if prop in self.immutable_properties:
- self.module.fail_json(msg='Cannot change property %s after creation.' % prop)
- else:
- self.set_property(prop, value)
+ if current_properties.get(prop, None) != value:
+ self.set_property(prop, value)
def get_current_properties(self):
- def get_properties_by_name(propname):
- cmd = [self.module.get_bin_path('zfs', True)]
- cmd += ['get', '-H', propname, self.name]
- rc, out, err = self.module.run_command(cmd)
- return [l.split('\t')[1:3] for l in out.splitlines()]
- properties = dict(get_properties_by_name('all'))
- if 'share.*' in properties:
- # Some ZFS pools list the sharenfs and sharesmb properties
- # hierarchically as share.nfs and share.smb respectively.
- del properties['share.*']
- for p, v in get_properties_by_name('share.all'):
- alias = p.replace('.', '') # share.nfs -> sharenfs (etc)
- properties[alias] = v
+ cmd = [self.zfs_cmd, 'get', '-H']
+ if self.enhanced_sharing:
+ cmd += ['-e']
+ cmd += ['all', self.name]
+ rc, out, err = self.module.run_command(" ".join(cmd))
+ properties = dict()
+ for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
+ if source == 'local':
+ properties[prop] = value
+ # Add alias for enhanced sharing properties
+ if self.enhanced_sharing:
+ properties['sharenfs'] = properties.get('share.nfs', None)
+ properties['sharesmb'] = properties.get('share.smb', None)
return properties
- def run_command(self, cmd):
- progname = cmd[0]
- cmd[0] = module.get_bin_path(progname, True)
- return module.run_command(cmd)
def main():
- # FIXME: should use dict() constructor like other modules, required=False is default
module = AnsibleModule(
- argument_spec = {
- 'name': {'required': True},
- 'state': {'required': True, 'choices':['present', 'absent']},
- 'aclinherit': {'required': False, 'choices':['discard', 'noallow', 'restricted', 'passthrough', 'passthrough-x']},
- 'aclmode': {'required': False, 'choices':['discard', 'groupmask', 'passthrough']},
- 'atime': {'required': False, 'choices':['on', 'off']},
- 'canmount': {'required': False, 'choices':['on', 'off', 'noauto']},
- 'casesensitivity': {'required': False, 'choices':['sensitive', 'insensitive', 'mixed']},
- 'checksum': {'required': False, 'choices':['on', 'off', 'fletcher2', 'fletcher4', 'sha256']},
- 'compression': {'required': False, 'choices':['on', 'off', 'lzjb', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lz4', 'zle']},
- 'copies': {'required': False, 'choices':['1', '2', '3']},
- 'createparent': {'required': False, 'choices':['on', 'off']},
- 'dedup': {'required': False, 'choices':['on', 'off']},
- 'devices': {'required': False, 'choices':['on', 'off']},
- 'exec': {'required': False, 'choices':['on', 'off']},
- # Not supported
- #'groupquota': {'required': False},
- 'jailed': {'required': False, 'choices':['on', 'off']},
- 'logbias': {'required': False, 'choices':['latency', 'throughput']},
- 'mountpoint': {'required': False},
- 'nbmand': {'required': False, 'choices':['on', 'off']},
- 'normalization': {'required': False, 'choices':['none', 'formC', 'formD', 'formKC', 'formKD']},
- 'origin': {'required': False},
- 'primarycache': {'required': False, 'choices':['all', 'none', 'metadata']},
- 'quota': {'required': False},
- 'readonly': {'required': False, 'choices':['on', 'off']},
- 'recordsize': {'required': False},
- 'refquota': {'required': False},
- 'refreservation': {'required': False},
- 'reservation': {'required': False},
- 'secondarycache': {'required': False, 'choices':['all', 'none', 'metadata']},
- 'setuid': {'required': False, 'choices':['on', 'off']},
- 'shareiscsi': {'required': False, 'choices':['on', 'off']},
- 'sharenfs': {'required': False},
- 'sharesmb': {'required': False},
- 'snapdir': {'required': False, 'choices':['hidden', 'visible']},
- 'sync': {'required': False, 'choices':['standard', 'always', 'disabled']},
- # Not supported
- #'userquota': {'required': False},
- 'utf8only': {'required': False, 'choices':['on', 'off']},
- 'volsize': {'required': False},
- 'volblocksize': {'required': False},
- 'vscan': {'required': False, 'choices':['on', 'off']},
- 'xattr': {'required': False, 'choices':['on', 'off']},
- 'zoned': {'required': False, 'choices':['on', 'off']},
- },
- supports_check_mode=True
+ argument_spec = dict(
+ name = dict(type='str', required=True),
+ state = dict(type='str', required=True, choices=['present', 'absent']),
+ # No longer used. Kept here to not interfere with zfs properties
+ createparent = dict(type='bool', required=False)
+ ),
+ supports_check_mode=True,
+ check_invalid_arguments=False
)
state = module.params.pop('state')
@@ -409,10 +243,16 @@ def main():
# Get all valid zfs-properties
properties = dict()
for prop, value in module.params.iteritems():
- if prop in ['CHECKMODE']:
- continue
- if value:
- properties[prop] = value
+ # All freestyle params are zfs properties
+ if prop not in module.argument_spec:
+ # Reverse the boolification of freestyle zfs properties
+ if isinstance(value, bool):
+ if value is True:
+ properties[prop] = 'on'
+ else:
+ properties[prop] = 'off'
+ else:
+ properties[prop] = value
result = {}
result['name'] = name
@@ -436,4 +276,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/test-docs.sh b/test-docs.sh
deleted file mode 100755
index 76297fbada6..00000000000
--- a/test-docs.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh
-set -x
-
-CHECKOUT_DIR=".ansible-checkout"
-MOD_REPO="$1"
-
-# Hidden file to avoid the module_formatter recursing into the checkout
-git clone https://github.com/ansible/ansible "$CHECKOUT_DIR"
-cd "$CHECKOUT_DIR"
-git submodule update --init
-rm -rf "lib/ansible/modules/$MOD_REPO"
-ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO"
-
-pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx
-
-. ./hacking/env-setup
-PAGER=/bin/cat bin/ansible-doc -l
-if [ $? -ne 0 ] ; then
- exit $?
-fi
-make -C docsite
diff --git a/test/utils/shippable/ci.sh b/test/utils/shippable/ci.sh
new file mode 100755
index 00000000000..5c0f847e661
--- /dev/null
+++ b/test/utils/shippable/ci.sh
@@ -0,0 +1,7 @@
+#!/bin/bash -eux
+
+set -o pipefail
+
+source_root=$(python -c "from os import path; print(path.abspath(path.join(path.dirname('$0'), '../../..')))")
+
+"${source_root}/test/utils/shippable/${TEST}.sh" 2>&1 | gawk '{ print strftime("%Y-%m-%d %H:%M:%S"), $0; fflush(); }'
diff --git a/test/utils/shippable/docs-requirements.txt b/test/utils/shippable/docs-requirements.txt
new file mode 100644
index 00000000000..4e859bb8c71
--- /dev/null
+++ b/test/utils/shippable/docs-requirements.txt
@@ -0,0 +1,2 @@
+jinja2
+pyyaml
diff --git a/test/utils/shippable/docs.sh b/test/utils/shippable/docs.sh
new file mode 100755
index 00000000000..2858f87c997
--- /dev/null
+++ b/test/utils/shippable/docs.sh
@@ -0,0 +1,62 @@
+#!/bin/bash -eux
+
+set -o pipefail
+
+ansible_repo_url="https://github.com/ansible/ansible.git"
+
+build_dir="${SHIPPABLE_BUILD_DIR}"
+repo="${REPO_NAME}"
+
+case "${repo}" in
+ "ansible-modules-core")
+ this_module_group="core"
+ other_module_group="extras"
+ ;;
+ "ansible-modules-extras")
+ this_module_group="extras"
+ other_module_group="core"
+ ;;
+ *)
+ echo "Unsupported repo name: ${repo}"
+ exit 1
+ ;;
+esac
+
+modules_tmp_dir="${build_dir}.tmp"
+this_modules_dir="${build_dir}/lib/ansible/modules/${this_module_group}"
+other_modules_dir="${build_dir}/lib/ansible/modules/${other_module_group}"
+
+cd /
+mv "${build_dir}" "${modules_tmp_dir}"
+git clone "${ansible_repo_url}" "${build_dir}"
+cd "${build_dir}"
+rmdir "${this_modules_dir}"
+mv "${modules_tmp_dir}" "${this_modules_dir}"
+mv "${this_modules_dir}/shippable" "${build_dir}"
+git submodule init "${other_modules_dir}"
+git submodule sync "${other_modules_dir}"
+git submodule update "${other_modules_dir}"
+
+pip install -r lib/ansible/modules/${this_module_group}/test/utils/shippable/docs-requirements.txt --upgrade
+pip list
+
+source hacking/env-setup
+
+docs_status=0
+
+PAGER=/bin/cat \
+ ANSIBLE_DEPRECATION_WARNINGS=false \
+ bin/ansible-doc -l \
+ 2>/tmp/ansible-doc.err || docs_status=$?
+
+if [ -s /tmp/ansible-doc.err ]; then
+ # report warnings as errors
+ echo "Output from 'ansible-doc -l' on stderr is considered an error:"
+ cat /tmp/ansible-doc.err
+ exit 1
+fi
+
+if [ "${docs_status}" -ne 0 ]; then
+ echo "Running 'ansible-doc -l' failed with no output on stderr and exit code: ${docs_status}"
+ exit 1
+fi
diff --git a/test/utils/shippable/integration.sh b/test/utils/shippable/integration.sh
new file mode 100755
index 00000000000..cf10e681bfb
--- /dev/null
+++ b/test/utils/shippable/integration.sh
@@ -0,0 +1,55 @@
+#!/bin/bash -eux
+
+set -o pipefail
+
+ansible_repo_url="https://github.com/ansible/ansible.git"
+
+is_pr="${IS_PULL_REQUEST}"
+build_dir="${SHIPPABLE_BUILD_DIR}"
+repo="${REPO_NAME}"
+
+if [ "${is_pr}" != "true" ]; then
+ echo "Module integration tests are only supported on pull requests."
+ exit 0
+fi
+
+case "${repo}" in
+ "ansible-modules-core")
+ this_module_group="core"
+ other_module_group="extras"
+ ;;
+ "ansible-modules-extras")
+ this_module_group="extras"
+ other_module_group="core"
+ ;;
+ *)
+ echo "Unsupported repo name: ${repo}"
+ exit 1
+ ;;
+esac
+
+modules_tmp_dir="${build_dir}.tmp"
+this_modules_dir="${build_dir}/lib/ansible/modules/${this_module_group}"
+other_modules_dir="${build_dir}/lib/ansible/modules/${other_module_group}"
+
+cd /
+mv "${build_dir}" "${modules_tmp_dir}"
+git clone "${ansible_repo_url}" "${build_dir}"
+cd "${build_dir}"
+rmdir "${this_modules_dir}"
+mv "${modules_tmp_dir}" "${this_modules_dir}"
+mv "${this_modules_dir}/shippable" "${build_dir}"
+git submodule init "${other_modules_dir}"
+git submodule sync "${other_modules_dir}"
+git submodule update "${other_modules_dir}"
+
+pip install -r test/utils/shippable/modules/generate-tests-requirements.txt --upgrade
+pip list
+
+source hacking/env-setup
+
+test/utils/shippable/modules/generate-tests "${this_module_group}" --verbose --output /tmp/integration.sh >/dev/null
+
+if [ -f /tmp/integration.sh ]; then
+ /bin/bash -eux /tmp/integration.sh
+fi
diff --git a/test/utils/shippable/sanity-skip-python24.txt b/test/utils/shippable/sanity-skip-python24.txt
new file mode 100644
index 00000000000..cf392501c6f
--- /dev/null
+++ b/test/utils/shippable/sanity-skip-python24.txt
@@ -0,0 +1,16 @@
+/cloud/
+/clustering/consul.*.py
+/clustering/znode.py
+/database/influxdb/
+/database/mssql/
+/monitoring/zabbix.*.py
+/network/f5/
+/notification/pushbullet.py
+/packaging/language/maven_artifact.py
+/packaging/os/dnf.py
+/packaging/os/layman.py
+/remote_management/ipmi/
+/univention/
+/web_infrastructure/letsencrypt.py
+/infrastructure/foreman/
+/network/nmcli.py
diff --git a/test/utils/shippable/sanity.sh b/test/utils/shippable/sanity.sh
new file mode 100755
index 00000000000..8c1453022e7
--- /dev/null
+++ b/test/utils/shippable/sanity.sh
@@ -0,0 +1,41 @@
+#!/bin/bash -eux
+
+source_root=$(python -c "from os import path; print(path.abspath(path.join(path.dirname('$0'), '../../..')))")
+
+install_deps="${INSTALL_DEPS:-}"
+
+cd "${source_root}"
+
+# FIXME REPOMERGE: No need to checkout ansible
+build_dir=$(mktemp -d)
+trap 'rm -rf "${build_dir}"' EXIT
+
+git clone "https://github.com/ansible/ansible.git" "${build_dir}" --recursive
+source "${build_dir}/hacking/env-setup"
+# REPOMERGE: END
+
+if [ "${install_deps}" != "" ]; then
+ add-apt-repository ppa:fkrull/deadsnakes
+ apt-add-repository 'deb http://archive.ubuntu.com/ubuntu trusty-backports universe'
+ apt-get update -qq
+
+ apt-get install -qq shellcheck python2.4
+
+ # Install dependencies for ansible and validate_modules
+ pip install -r "${build_dir}/test/utils/shippable/sanity-requirements.txt" --upgrade
+ pip list
+
+fi
+
+validate_modules="${build_dir}/test/sanity/validate-modules/validate-modules"
+
+python2.4 -m compileall -fq -x "($(printf %s "$(< "test/utils/shippable/sanity-skip-python24.txt"))" | tr '\n' '|')" .
+python2.6 -m compileall -fq .
+python2.7 -m compileall -fq .
+python3.5 -m compileall -fq .
+
+ANSIBLE_DEPRECATION_WARNINGS=false \
+ "${validate_modules}" --exclude '/utilities/|/shippable(/|$)' .
+
+shellcheck \
+ test/utils/shippable/*.sh
diff --git a/univention/__init__.py b/univention/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/univention/udm_dns_record.py b/univention/udm_dns_record.py
new file mode 100644
index 00000000000..92cea504948
--- /dev/null
+++ b/univention/udm_dns_record.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+ config,
+ uldap,
+)
+from univention.admin.handlers.dns import (
+ forward_zone,
+ reverse_zone,
+)
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: udm_dns_record
+version_added: "2.2"
+author: "Tobias Rueetschi (@2-B)"
+short_description: Manage dns entries on a univention corporate server
+description:
+ - "This module allows to manage dns records on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns record is present or not.
+ name:
+ required: true
+ description:
+ - "Name of the record, this is also the DNS record. E.g. www for
+ www.example.com."
+ zone:
+ required: true
+ description:
+ - Corresponding DNS zone for this record, e.g. example.com.
+ type:
+ required: true
+ choices: [ host_record, alias, ptr_record, srv_record, txt_record ]
+ description:
+ - "Define the record type. C(host_record) is a A or AAAA record,
+ C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record)
+ is a SRV record and C(txt_record) is a TXT record."
+ data:
+ required: false
+ default: []
+ description:
+ - "Additional data for this record, e.g. ['a': '192.0.2.1'].
+ Required if C(state=present)."
+'''
+
+
+EXAMPLES = '''
+# Create a DNS record on a UCS
+- udm_dns_zone:
+ name: www
+ zone: example.com
+ type: host_record
+ data:
+ - a: 192.0.2.1
+'''
+
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ type = dict(required=True,
+ type='str'),
+ zone = dict(required=True,
+ type='str'),
+ name = dict(required=True,
+ type='str'),
+ data = dict(default=[],
+ type='dict'),
+ state = dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if = ([
+ ('state', 'present', ['data'])
+ ])
+ )
+ type = module.params['type']
+ zone = module.params['zone']
+ name = module.params['name']
+ data = module.params['data']
+ state = module.params['state']
+ changed = False
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={})(relativeDomainName={}))'.format(zone, name),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'zoneName={},cn=dns,{}'.format(zone, base_dn())
+ dn = 'relativeDomainName={},{}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ so = forward_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={})'.format(zone),
+ scope='domain',
+ ) or reverse_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={})'.format(zone),
+ scope='domain',
+ )
+ obj = umc_module_for_add('dns/{}'.format(type), container, superordinate=so[0])
+ else:
+ obj = umc_module_for_edit('dns/{}'.format(type), dn)
+ obj['name'] = name
+ for k, v in data.items():
+ obj[k] = v
+ diff = obj.diff()
+ changed = obj.diff() != []
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ else:
+ obj.modify()
+ except BaseException as e:
+ module.fail_json(
+ msg='Creating/editing dns entry {} in {} failed: {}'.format(name, container, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except BaseException as e:
+ module.fail_json(
+ msg='Removing dns entry {} in {} failed: {}'.format(name, container, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/univention/udm_dns_zone.py b/univention/udm_dns_zone.py
new file mode 100644
index 00000000000..2d7bbd09070
--- /dev/null
+++ b/univention/udm_dns_zone.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: udm_dns_zone
+version_added: "2.2"
+author: "Tobias Rueetschi (@2-B)"
+short_description: Manage dns zones on a univention corporate server
+description:
+ - "This module allows to manage dns zones on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns zone is present or not.
+ type:
+ required: true
+ choices: [ forward_zone, reverse_zone ]
+ description:
+ - Define if the zone is a forward or reverse DNS zone.
+ zone:
+ required: true
+ description:
+ - DNS zone name, e.g. C(example.com).
+ nameserver:
+ required: false
+ description:
+ - List of appropriate name servers. Required if C(state=present).
+ interfaces:
+ required: false
+ description:
+ - List of interface IP addresses, on which the server should
+ response this zone. Required if C(state=present).
+
+ refresh:
+ required: false
+ default: 3600
+ description:
+ - Interval before the zone should be refreshed.
+ retry:
+ required: false
+ default: 1800
+ description:
+ - Interval that should elapse before a failed refresh should be retried.
+ expire:
+ required: false
+ default: 604800
+ description:
+ - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
+ ttl:
+ required: false
+ default: 600
+ description:
+ - Minimum TTL field that should be exported with any RR from this zone.
+
+ contact:
+ required: false
+ default: ''
+ description:
+ - Contact person in the SOA record.
+ mx:
+ required: false
+ default: []
+ description:
+ - List of MX servers. (Must declared as A or AAAA records).
+'''
+
+
+EXAMPLES = '''
+# Create a DNS zone on a UCS
+- udm_dns_zone:
+ zone: example.com
+ type: forward_zone
+ nameserver:
+ - ucs.example.com
+ interfaces:
+ - 192.0.2.1
+'''
+
+
+RETURN = '''# '''
+
+
+def convert_time(time):
+ """Convert a time in seconds into the biggest unit"""
+ units = [
+ (24 * 60 * 60 , 'days'),
+ (60 * 60 , 'hours'),
+ (60 , 'minutes'),
+ (1 , 'seconds'),
+ ]
+
+ if time == 0:
+ return ('0', 'seconds')
+ for unit in units:
+ if time >= unit[0]:
+ return ('{}'.format(time // unit[0]), unit[1])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ type = dict(required=True,
+ type='str'),
+ zone = dict(required=True,
+ aliases=['name'],
+ type='str'),
+ nameserver = dict(default=[],
+ type='list'),
+ interfaces = dict(default=[],
+ type='list'),
+ refresh = dict(default=3600,
+ type='int'),
+ retry = dict(default=1800,
+ type='int'),
+ expire = dict(default=604800,
+ type='int'),
+ ttl = dict(default=600,
+ type='int'),
+ contact = dict(default='',
+ type='str'),
+ mx = dict(default=[],
+ type='list'),
+ state = dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if = ([
+ ('state', 'present', ['nameserver', 'interfaces'])
+ ])
+ )
+ type = module.params['type']
+ zone = module.params['zone']
+ nameserver = module.params['nameserver']
+ interfaces = module.params['interfaces']
+ refresh = module.params['refresh']
+ retry = module.params['retry']
+ expire = module.params['expire']
+ ttl = module.params['ttl']
+ contact = module.params['contact']
+ mx = module.params['mx']
+ state = module.params['state']
+ changed = False
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={}))'.format(zone),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=dns,{}'.format(base_dn())
+ dn = 'zoneName={},{}'.format(zone, container)
+ if contact == '':
+ contact = 'root@{}.'.format(zone)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('dns/{}'.format(type), container)
+ else:
+ obj = umc_module_for_edit('dns/{}'.format(type), dn)
+ obj['zone'] = zone
+ obj['nameserver'] = nameserver
+ obj['a'] = interfaces
+ obj['refresh'] = convert_time(refresh)
+ obj['retry'] = convert_time(retry)
+ obj['expire'] = convert_time(expire)
+ obj['ttl'] = convert_time(ttl)
+ obj['contact'] = contact
+ obj['mx'] = mx
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns zone {} failed: {}'.format(zone, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns zone {} failed: {}'.format(zone, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ diff=diff,
+ zone=zone
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/univention/udm_group.py b/univention/udm_group.py
new file mode 100644
index 00000000000..82ef43faef5
--- /dev/null
+++ b/univention/udm_group.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: udm_group
+version_added: "2.2"
+author: "Tobias Rueetschi (@2-B)"
+short_description: Manage of the posix group
+description:
+ - "This module allows to manage user groups on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the group is present or not.
+ name:
+ required: true
+ description:
+ - Name of the posix group.
+ description:
+ required: false
+ description:
+ - Group description.
+ position:
+ required: false
+ description:
+ - define the whole ldap position of the group, e.g.
+ C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
+ ou:
+ required: false
+ description:
+ - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
+ subpath:
+ required: false
+ description:
+ - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
+'''
+
+
+EXAMPLES = '''
+# Create a POSIX group
+- udm_group:
+ name: g123m-1A
+
+# Create a POSIX group with the exact DN
+# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
+- udm_group:
+ name: g123m-1A
+ subpath: 'cn=classes,cn=students,cn=groups'
+ ou: school
+# or
+- udm_group:
+ name: g123m-1A
+ position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True,
+ type='str'),
+ description = dict(default=None,
+ type='str'),
+ position = dict(default='',
+ type='str'),
+ ou = dict(default='',
+ type='str'),
+ subpath = dict(default='cn=groups',
+ type='str'),
+ state = dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ description = module.params['description']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+
+ groups = list(ldap_search(
+ '(&(objectClass=posixGroup)(cn={}))'.format(name),
+ attr=['cn']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={},'.format(ou)
+ if subpath != '':
+ subpath = '{},'.format(subpath)
+ container = '{}{}{}'.format(subpath, ou, base_dn())
+ group_dn = 'cn={},{}'.format(name, container)
+
+ exists = bool(len(groups))
+
+ if state == 'present':
+ try:
+ if not exists:
+ grp = umc_module_for_add('groups/group', container)
+ else:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ grp['name'] = name
+ grp['description'] = description
+ diff = grp.diff()
+ changed = grp.diff() != []
+ if not module.check_mode:
+ if not exists:
+ grp.create()
+ else:
+ grp.modify()
+ except:
+ module.fail_json(
+ msg="Creating/editing group {} in {} failed".format(name, container)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ if not module.check_mode:
+ grp.remove()
+ changed = True
+ except:
+ module.fail_json(
+ msg="Removing group {} failed".format(name)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/univention/udm_share.py b/univention/udm_share.py
new file mode 100644
index 00000000000..7cb472c3141
--- /dev/null
+++ b/univention/udm_share.py
@@ -0,0 +1,622 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: udm_share
+version_added: "2.2"
+author: "Tobias Rueetschi (@2-B)"
+short_description: Manage samba shares on a univention corporate server
+description:
+ - "This module allows to manage samba shares on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the share is present or not.
+ name:
+ required: true
+ description:
+ - Name
+ host:
+ required: false
+ default: None
+ description:
+ - Host FQDN (server which provides the share), e.g. C({{
+ ansible_fqdn }}). Required if C(state=present).
+ path:
+ required: false
+ default: None
+ description:
+ - Directory on the providing server, e.g. C(/home). Required if C(state=present).
+ samba_name:
+ required: false
+ default: None
+ description:
+ - Windows name. Required if C(state=present).
+ aliases: [ sambaName ]
+ ou:
+ required: true
+ description:
+ - Organisational unit, inside the LDAP Base DN.
+ owner:
+ required: false
+ default: 0
+ description:
+ - Directory owner of the share's root directory.
+ group:
+ required: false
+ default: '0'
+ description:
+ - Directory owner group of the share's root directory.
+ directorymode:
+ required: false
+ default: '00755'
+ description:
+ - Permissions for the share's root directory.
+ root_squash:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Modify user ID for root user (root squashing).
+ subtree_checking:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Subtree checking.
+ sync:
+ required: false
+ default: 'sync'
+ description:
+ - NFS synchronisation.
+ writeable:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - NFS write access.
+ samba_block_size:
+ required: false
+ default: None
+ description:
+ - Blocking size.
+ aliases: [ sambaBlockSize ]
+ samba_blocking_locks:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Blocking locks.
+ aliases: [ sambaBlockingLocks ]
+ samba_browseable:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Show in Windows network environment.
+ aliases: [ sambaBrowseable ]
+ samba_create_mode:
+ required: false
+ default: '0744'
+ description:
+ - File mode.
+ aliases: [ sambaCreateMode ]
+ samba_csc_policy:
+ required: false
+ default: 'manual'
+ description:
+ - Client-side caching policy.
+ aliases: [ sambaCscPolicy ]
+ samba_custom_settings:
+ required: false
+ default: []
+ description:
+ - Option name in smb.conf and its value.
+ aliases: [ sambaCustomSettings ]
+ samba_directory_mode:
+ required: false
+ default: '0755'
+ description:
+ - Directory mode.
+ aliases: [ sambaDirectoryMode ]
+ samba_directory_security_mode:
+ required: false
+ default: '0777'
+ description:
+ - Directory security mode.
+ aliases: [ sambaDirectorySecurityMode ]
+ samba_dos_filemode:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Users with write access may modify permissions.
+ aliases: [ sambaDosFilemode ]
+ samba_fake_oplocks:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Fake oplocks.
+ aliases: [ sambaFakeOplocks ]
+ samba_force_create_mode:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Force file mode.
+ aliases: [ sambaForceCreateMode ]
+ samba_force_directory_mode:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Force directory mode.
+ aliases: [ sambaForceDirectoryMode ]
+ samba_force_directory_security_mode:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Force directory security mode.
+ aliases: [ sambaForceDirectorySecurityMode ]
+ samba_force_group:
+ required: false
+ default: None
+ description:
+ - Force group.
+ aliases: [ sambaForceGroup ]
+ samba_force_security_mode:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Force security mode.
+ aliases: [ sambaForceSecurityMode ]
+ samba_force_user:
+ required: false
+ default: None
+ description:
+ - Force user.
+ aliases: [ sambaForceUser ]
+ samba_hide_files:
+ required: false
+ default: None
+ description:
+ - Hide files.
+ aliases: [ sambaHideFiles ]
+ samba_hide_unreadable:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Hide unreadable files/directories.
+ aliases: [ sambaHideUnreadable ]
+ samba_hosts_allow:
+ required: false
+ default: []
+ description:
+ - Allowed host/network.
+ aliases: [ sambaHostsAllow ]
+ samba_hosts_deny:
+ required: false
+ default: []
+ description:
+ - Denied host/network.
+ aliases: [ sambaHostsDeny ]
+ samba_inherit_acls:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Inherit ACLs.
+ aliases: [ sambaInheritAcls ]
+ samba_inherit_owner:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Create files/directories with the owner of the parent directory.
+ aliases: [ sambaInheritOwner ]
+ samba_inherit_permissions:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Create files/directories with permissions of the parent directory.
+ aliases: [ sambaInheritPermissions ]
+ samba_invalid_users:
+ required: false
+ default: None
+ description:
+ - Invalid users or groups.
+ aliases: [ sambaInvalidUsers ]
+ samba_level_2_oplocks:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Level 2 oplocks.
+ aliases: [ sambaLevel2Oplocks ]
+ samba_locking:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Locking.
+ aliases: [ sambaLocking ]
+ samba_msdfs_root:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - MSDFS root.
+ aliases: [ sambaMSDFSRoot ]
+ samba_nt_acl_support:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - NT ACL support.
+ aliases: [ sambaNtAclSupport ]
+ samba_oplocks:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Oplocks.
+ aliases: [ sambaOplocks ]
+ samba_postexec:
+ required: false
+ default: None
+ description:
+ - Postexec script.
+ aliases: [ sambaPostexec ]
+ samba_preexec:
+ required: false
+ default: None
+ description:
+ - Preexec script.
+ aliases: [ sambaPreexec ]
+ samba_public:
+ required: false
+ default: '0'
+ choices: [ '0', '1' ]
+ description:
+ - Allow anonymous read-only access with a guest user.
+ aliases: [ sambaPublic ]
+ samba_security_mode:
+ required: false
+ default: '0777'
+ description:
+ - Security mode.
+ aliases: [ sambaSecurityMode ]
+ samba_strict_locking:
+ required: false
+ default: 'Auto'
+ description:
+ - Strict locking.
+ aliases: [ sambaStrictLocking ]
+ samba_vfs_objects:
+ required: false
+ default: None
+ description:
+ - VFS objects.
+ aliases: [ sambaVFSObjects ]
+ samba_valid_users:
+ required: false
+ default: None
+ description:
+ - Valid users or groups.
+ aliases: [ sambaValidUsers ]
+ samba_write_list:
+ required: false
+ default: None
+ description:
+ - Restrict write access to these users/groups.
+ aliases: [ sambaWriteList ]
+ samba_writeable:
+ required: false
+ default: '1'
+ choices: [ '0', '1' ]
+ description:
+ - Samba write access.
+ aliases: [ sambaWriteable ]
+ nfs_hosts:
+ required: false
+ default: []
+ description:
+ - Only allow access for this host, IP address or network.
+ nfs_custom_settings:
+ required: false
+ default: []
+ description:
+ - Option name in exports file.
+ aliases: [ nfsCustomSettings ]
+'''
+
+
+EXAMPLES = '''
+# Create a share named home on the server ucs.example.com with the path /home.
+- udm_share:
+ name: home
+ path: /home
+ host: ucs.example.com
+ sambaName: Home
+'''
+
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True,
+ type='str'),
+ ou = dict(required=True,
+ type='str'),
+ owner = dict(type='str',
+ default='0'),
+ group = dict(type='str',
+ default='0'),
+ path = dict(type='path',
+ default=None),
+ directorymode = dict(type='str',
+ default='00755'),
+ host = dict(type='str',
+ default=None),
+ root_squash = dict(type='bool',
+ default=True),
+ subtree_checking = dict(type='bool',
+ default=True),
+ sync = dict(type='str',
+ default='sync'),
+ writeable = dict(type='bool',
+ default=True),
+ sambaBlockSize = dict(type='str',
+ aliases=['samba_block_size'],
+ default=None),
+ sambaBlockingLocks = dict(type='bool',
+ aliases=['samba_blocking_locks'],
+ default=True),
+ sambaBrowseable = dict(type='bool',
+ aliases=['samba_browsable'],
+ default=True),
+ sambaCreateMode = dict(type='str',
+ aliases=['samba_create_mode'],
+ default='0744'),
+ sambaCscPolicy = dict(type='str',
+ aliases=['samba_csc_policy'],
+ default='manual'),
+ sambaCustomSettings = dict(type='list',
+ aliases=['samba_custom_settings'],
+ default=[]),
+ sambaDirectoryMode = dict(type='str',
+ aliases=['samba_directory_mode'],
+ default='0755'),
+ sambaDirectorySecurityMode = dict(type='str',
+ aliases=['samba_directory_security_mode'],
+ default='0777'),
+ sambaDosFilemode = dict(type='bool',
+ aliases=['samba_dos_filemode'],
+ default=False),
+ sambaFakeOplocks = dict(type='bool',
+ aliases=['samba_fake_oplocks'],
+ default=False),
+ sambaForceCreateMode = dict(type='bool',
+ aliases=['samba_force_create_mode'],
+ default=False),
+ sambaForceDirectoryMode = dict(type='bool',
+ aliases=['samba_force_directory_mode'],
+ default=False),
+ sambaForceDirectorySecurityMode = dict(type='bool',
+ aliases=['samba_force_directory_security_mode'],
+ default=False),
+ sambaForceGroup = dict(type='str',
+ aliases=['samba_force_group'],
+ default=None),
+ sambaForceSecurityMode = dict(type='bool',
+ aliases=['samba_force_security_mode'],
+ default=False),
+ sambaForceUser = dict(type='str',
+ aliases=['samba_force_user'],
+ default=None),
+ sambaHideFiles = dict(type='str',
+ aliases=['samba_hide_files'],
+ default=None),
+ sambaHideUnreadable = dict(type='bool',
+ aliases=['samba_hide_unreadable'],
+ default=False),
+ sambaHostsAllow = dict(type='list',
+ aliases=['samba_hosts_allow'],
+ default=[]),
+ sambaHostsDeny = dict(type='list',
+ aliases=['samba_hosts_deny'],
+ default=[]),
+ sambaInheritAcls = dict(type='bool',
+ aliases=['samba_inherit_acls'],
+ default=True),
+ sambaInheritOwner = dict(type='bool',
+ aliases=['samba_inherit_owner'],
+ default=False),
+ sambaInheritPermissions = dict(type='bool',
+ aliases=['samba_inherit_permissions'],
+ default=False),
+ sambaInvalidUsers = dict(type='str',
+ aliases=['samba_invalid_users'],
+ default=None),
+ sambaLevel2Oplocks = dict(type='bool',
+ aliases=['samba_level_2_oplocks'],
+ default=True),
+ sambaLocking = dict(type='bool',
+ aliases=['samba_locking'],
+ default=True),
+ sambaMSDFSRoot = dict(type='bool',
+ aliases=['samba_msdfs_root'],
+ default=False),
+ sambaName = dict(type='str',
+ aliases=['samba_name'],
+ default=None),
+ sambaNtAclSupport = dict(type='bool',
+ aliases=['samba_nt_acl_support'],
+ default=True),
+ sambaOplocks = dict(type='bool',
+ aliases=['samba_oplocks'],
+ default=True),
+ sambaPostexec = dict(type='str',
+ aliases=['samba_postexec'],
+ default=None),
+ sambaPreexec = dict(type='str',
+ aliases=['samba_preexec'],
+ default=None),
+ sambaPublic = dict(type='bool',
+ aliases=['samba_public'],
+ default=False),
+ sambaSecurityMode = dict(type='str',
+ aliases=['samba_security_mode'],
+ default='0777'),
+ sambaStrictLocking = dict(type='str',
+ aliases=['samba_strict_locking'],
+ default='Auto'),
+ sambaVFSObjects = dict(type='str',
+ aliases=['samba_vfs_objects'],
+ default=None),
+ sambaValidUsers = dict(type='str',
+ aliases=['samba_valid_users'],
+ default=None),
+ sambaWriteList = dict(type='str',
+ aliases=['samba_write_list'],
+ default=None),
+ sambaWriteable = dict(type='bool',
+ aliases=['samba_writeable'],
+ default=True),
+ nfs_hosts = dict(type='list',
+ default=[]),
+ nfsCustomSettings = dict(type='list',
+ aliases=['nfs_custom_settings'],
+ default=[]),
+ state = dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if = ([
+ ('state', 'present', ['path', 'host', 'sambaName'])
+ ])
+ )
+ name = module.params['name']
+ state = module.params['state']
+ changed = False
+
+ obj = list(ldap_search(
+ '(&(objectClass=univentionShare)(cn={}))'.format(name),
+ attr=['cn']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=shares,ou={},{}'.format(module.params['ou'], base_dn())
+ dn = 'cn={},{}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('shares/share', container)
+ else:
+ obj = umc_module_for_edit('shares/share', dn)
+
+ module.params['printablename'] = '{} ({})'.format(name, module.params['host'])
+ for k in obj.keys():
+ if module.params[k] is True:
+ module.params[k] = '1'
+ elif module.params[k] is False:
+ module.params[k] = '0'
+ obj[k] = module.params[k]
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except BaseException as err:
+ module.fail_json(
+ msg='Creating/editing share {} in {} failed: {}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('shares/share', dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except BaseException as err:
+ module.fail_json(
+ msg='Removing share {} in {} failed: {}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/univention/udm_user.py b/univention/udm_user.py
new file mode 100644
index 00000000000..ac2d8acb11e
--- /dev/null
+++ b/univention/udm_user.py
@@ -0,0 +1,598 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+from datetime import date
+import crypt
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+from dateutil.relativedelta import relativedelta
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: udm_user
+version_added: "2.2"
+author: "Tobias Rueetschi (@2-B)"
+short_description: Manage posix users on a univention corporate server
+description:
+ - "This module allows to manage posix users on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the user is present or not.
+ username:
+ required: true
+ description:
+ - User name
+ aliases: ['name']
+ firstname:
+ required: false
+ description:
+ - First name. Required if C(state=present).
+ lastname:
+ required: false
+ description:
+ - Last name. Required if C(state=present).
+ password:
+ required: false
+ default: None
+ description:
+ - Password. Required if C(state=present).
+ birthday:
+ required: false
+ default: None
+ description:
+ - Birthday
+ city:
+ required: false
+ default: None
+ description:
+ - City of users business address.
+ country:
+ required: false
+ default: None
+ description:
+ - Country of users business address.
+ department_number:
+ required: false
+ default: None
+ description:
+ - Department number of users business address.
+ aliases: [ departmentNumber ]
+ description:
+ required: false
+ default: None
+ description:
+ - Description (not gecos)
+ display_name:
+ required: false
+ default: None
+ description:
+ - Display name (not gecos)
+ aliases: [ displayName ]
+ email:
+ required: false
+ default: ['']
+ description:
+ - A list of e-mail addresses.
+ employee_number:
+ required: false
+ default: None
+ description:
+ - Employee number
+ aliases: [ employeeNumber ]
+ employee_type:
+ required: false
+ default: None
+ description:
+ - Employee type
+ aliases: [ employeeType ]
+ gecos:
+ required: false
+ default: None
+ description:
+ - GECOS
+ groups:
+ required: false
+ default: []
+ description:
+ - "POSIX groups, the LDAP DNs of the groups will be found with the
+ LDAP filter for each group as $GROUP:
+ C((&(objectClass=posixGroup)(cn=$GROUP)))."
+ home_share:
+ required: false
+ default: None
+ description:
+ - "Home NFS share. Must be a LDAP DN, e.g.
+ C(cn=home,cn=shares,ou=school,dc=example,dc=com)."
+ aliases: [ homeShare ]
+ home_share_path:
+ required: false
+ default: None
+ description:
+ - Path to home NFS share, inside the homeShare.
+ aliases: [ homeSharePath ]
+ home_telephone_number:
+ required: false
+ default: []
+ description:
+ - List of private telephone numbers.
+ aliases: [ homeTelephoneNumber ]
+ homedrive:
+ required: false
+ default: None
+ description:
+ - Windows home drive, e.g. C("H:").
+ mail_alternative_address:
+ required: false
+ default: []
+ description:
+ - List of alternative e-mail addresses.
+ aliases: [ mailAlternativeAddress ]
+ mail_home_server:
+ required: false
+ default: None
+ description:
+ - FQDN of mail server
+ aliases: [ mailHomeServer ]
+ mail_primary_address:
+ required: false
+ default: None
+ description:
+ - Primary e-mail address
+ aliases: [ mailPrimaryAddress ]
+ mobile_telephone_number:
+ required: false
+ default: []
+ description:
+ - Mobile phone number
+ aliases: [ mobileTelephoneNumber ]
+ organisation:
+ required: false
+ default: None
+ description:
+ - Organisation
+ override_pw_history:
+ required: false
+ default: False
+ description:
+ - Override password history
+ aliases: [ overridePWHistory ]
+ override_pw_length:
+ required: false
+ default: False
+ description:
+ - Override password check
+ aliases: [ overridePWLength ]
+ pager_telephonenumber:
+ required: false
+ default: []
+ description:
+ - List of pager telephone numbers.
+ aliases: [ pagerTelephonenumber ]
+ phone:
+ required: false
+ default: []
+ description:
+ - List of telephone numbers.
+ postcode:
+ required: false
+ default: None
+ description:
+ - Postal code of users business address.
+ primary_group:
+ required: false
+ default: cn=Domain Users,cn=groups,$LDAP_BASE_DN
+ description:
+ - Primary group. This must be the group LDAP DN.
+ aliases: [ primaryGroup ]
+ profilepath:
+ required: false
+ default: None
+ description:
+ - Windows profile directory
+ pwd_change_next_login:
+ required: false
+ default: None
+ choices: [ '0', '1' ]
+ description:
+ - Change password on next login.
+ aliases: [ pwdChangeNextLogin ]
+ room_number:
+ required: false
+ default: None
+ description:
+ - Room number of users business address.
+ aliases: [ roomNumber ]
+ samba_privileges:
+ required: false
+ default: []
+ description:
+ - "Samba privilege, like allow printer administration, do domain
+ join."
+ aliases: [ sambaPrivileges ]
+ samba_user_workstations:
+ required: false
+ default: []
+ description:
+ - Allow the authentication only on this Microsoft Windows host.
+ aliases: [ sambaUserWorkstations ]
+ sambahome:
+ required: false
+ default: None
+ description:
+ - Windows home path, e.g. C('\\\\$FQDN\\$USERNAME').
+ scriptpath:
+ required: false
+ default: None
+ description:
+ - Windows logon script.
+ secretary:
+ required: false
+ default: []
+ description:
+ - A list of superiors as LDAP DNs.
+ serviceprovider:
+ required: false
+ default: ['']
+ description:
+ - Enable user for the following service providers.
+ shell:
+ required: false
+ default: '/bin/bash'
+ description:
+ - Login shell
+ street:
+ required: false
+ default: None
+ description:
+ - Street of users business address.
+ title:
+ required: false
+ default: None
+ description:
+ - Title, e.g. C(Prof.).
+ unixhome:
+ required: false
+ default: '/home/$USERNAME'
+ description:
+ - Unix home directory
+ userexpiry:
+ required: false
+ default: Today + 1 year
+ description:
+ - Account expiry date, e.g. C(1999-12-31).
+ position:
+ required: false
+ default: ''
+ description:
+ - "Define the whole position of users object inside the LDAP tree,
+ e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)."
+ ou:
+ required: false
+ default: ''
+ description:
+ - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for
+ LDAP OU C(ou=school,dc=example,dc=com)."
+ subpath:
+ required: false
+ default: 'cn=users'
+ description:
+ - "LDAP subpath inside the organizational unit, e.g.
+ C(cn=teachers,cn=users) for LDAP container
+ C(cn=teachers,cn=users,dc=example,dc=com)."
+'''
+
+
+EXAMPLES = '''
+# Create a user on a UCS
+- udm_user:
+ name: FooBar
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+
+# Create a user with the DN
+# C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+- udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ ou: school
+ subpath: 'cn=teachers,cn=users'
+# or define the position
+- udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+
+def main():
+ expiry = date.strftime(date.today() + relativedelta(years=1), "%Y-%m-%d")
+ module = AnsibleModule(
+ argument_spec = dict(
+ birthday = dict(default=None,
+ type='str'),
+ city = dict(default=None,
+ type='str'),
+ country = dict(default=None,
+ type='str'),
+ department_number = dict(default=None,
+ type='str',
+ aliases=['departmentNumber']),
+ description = dict(default=None,
+ type='str'),
+ display_name = dict(default=None,
+ type='str',
+ aliases=['displayName']),
+ email = dict(default=[''],
+ type='list'),
+ employee_number = dict(default=None,
+ type='str',
+ aliases=['employeeNumber']),
+ employee_type = dict(default=None,
+ type='str',
+ aliases=['employeeType']),
+ firstname = dict(default=None,
+ type='str'),
+ gecos = dict(default=None,
+ type='str'),
+ groups = dict(default=[],
+ type='list'),
+ home_share = dict(default=None,
+ type='str',
+ aliases=['homeShare']),
+ home_share_path = dict(default=None,
+ type='str',
+ aliases=['homeSharePath']),
+ home_telephone_number = dict(default=[],
+ type='list',
+ aliases=['homeTelephoneNumber']),
+ homedrive = dict(default=None,
+ type='str'),
+ lastname = dict(default=None,
+ type='str'),
+ mail_alternative_address= dict(default=[],
+ type='list',
+ aliases=['mailAlternativeAddress']),
+ mail_home_server = dict(default=None,
+ type='str',
+ aliases=['mailHomeServer']),
+ mail_primary_address = dict(default=None,
+ type='str',
+ aliases=['mailPrimaryAddress']),
+ mobile_telephone_number = dict(default=[],
+ type='list',
+ aliases=['mobileTelephoneNumber']),
+ organisation = dict(default=None,
+ type='str'),
+ overridePWHistory = dict(default=False,
+ type='bool',
+ aliases=['override_pw_history']),
+ overridePWLength = dict(default=False,
+ type='bool',
+ aliases=['override_pw_length']),
+ pager_telephonenumber = dict(default=[],
+ type='list',
+ aliases=['pagerTelephonenumber']),
+ password = dict(default=None,
+ type='str',
+ no_log=True),
+ phone = dict(default=[],
+ type='list'),
+ postcode = dict(default=None,
+ type='str'),
+ primary_group = dict(default=None,
+ type='str',
+ aliases=['primaryGroup']),
+ profilepath = dict(default=None,
+ type='str'),
+ pwd_change_next_login = dict(default=None,
+ type='str',
+ choices=['0', '1'],
+ aliases=['pwdChangeNextLogin']),
+ room_number = dict(default=None,
+ type='str',
+ aliases=['roomNumber']),
+ samba_privileges = dict(default=[],
+ type='list',
+ aliases=['sambaPrivileges']),
+ samba_user_workstations = dict(default=[],
+ type='list',
+ aliases=['sambaUserWorkstations']),
+ sambahome = dict(default=None,
+ type='str'),
+ scriptpath = dict(default=None,
+ type='str'),
+ secretary = dict(default=[],
+ type='list'),
+ serviceprovider = dict(default=[''],
+ type='list'),
+ shell = dict(default='/bin/bash',
+ type='str'),
+ street = dict(default=None,
+ type='str'),
+ title = dict(default=None,
+ type='str'),
+ unixhome = dict(default=None,
+ type='str'),
+ userexpiry = dict(default=expiry,
+ type='str'),
+ username = dict(required=True,
+ aliases=['name'],
+ type='str'),
+ position = dict(default='',
+ type='str'),
+ ou = dict(default='',
+ type='str'),
+ subpath = dict(default='cn=users',
+ type='str'),
+ state = dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if = ([
+ ('state', 'present', ['firstname', 'lastname', 'password'])
+ ])
+ )
+ username = module.params['username']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+
+ users = list(ldap_search(
+ '(&(objectClass=posixAccount)(uid={}))'.format(username),
+ attr=['uid']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={},'.format(ou)
+ if subpath != '':
+ subpath = '{},'.format(subpath)
+ container = '{}{}{}'.format(subpath, ou, base_dn())
+ user_dn = 'uid={},{}'.format(username, container)
+
+ exists = bool(len(users))
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('users/user', container)
+ else:
+ obj = umc_module_for_edit('users/user', user_dn)
+
+ if module.params['displayName'] is None:
+ module.params['displayName'] = '{} {}'.format(
+ module.params['firstname'],
+ module.params['lastname']
+ )
+ if module.params['unixhome'] is None:
+ module.params['unixhome'] = '/home/{}'.format(
+ module.params['username']
+ )
+ for k in obj.keys():
+ if (k != 'password' and
+ k != 'groups' and
+ k != 'overridePWHistory' and
+ k in module.params and
+ module.params[k] is not None):
+ obj[k] = module.params[k]
+ # handle some special values
+ obj['e-mail'] = module.params['email']
+ password = module.params['password']
+ if obj['password'] is None:
+ obj['password'] = password
+ else:
+ old_password = obj['password'].split('}', 2)[1]
+ if crypt.crypt(password, old_password) != old_password:
+ obj['overridePWHistory'] = module.params['overridePWHistory']
+ obj['overridePWLength'] = module.params['overridePWLength']
+ obj['password'] = password
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except:
+ module.fail_json(
+ msg="Creating/editing user {} in {} failed".format(
+ username,
+ container
+ )
+ )
+ try:
+ groups = module.params['groups']
+ if groups:
+ filter = '(&(objectClass=posixGroup)(|(cn={})))'.format(
+ ')(cn='.join(groups)
+ )
+ group_dns = list(ldap_search(filter, attr=['dn']))
+ for dn in group_dns:
+ grp = umc_module_for_edit('groups/group', dn[0])
+ if user_dn not in grp['users']:
+ grp['users'].append(user_dn)
+ if not module.check_mode:
+ grp.modify()
+ changed = True
+ except:
+ module.fail_json(
+ msg="Adding groups to user {} failed".format(username)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('users/user', user_dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except:
+ module.fail_json(
+ msg="Removing user {} failed".format(username)
+ )
+
+ module.exit_json(
+ changed=changed,
+ username=username,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/web_infrastructure/apache2_mod_proxy.py b/web_infrastructure/apache2_mod_proxy.py
new file mode 100644
index 00000000000..4d2f2c39a8f
--- /dev/null
+++ b/web_infrastructure/apache2_mod_proxy.py
@@ -0,0 +1,453 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Olivier Boukili
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: apache2_mod_proxy
+version_added: "2.2"
+short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
+description:
+ - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
+ pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
+ status page has to be enabled and accessible, as this module relies on parsing
+ this page. This module supports ansible check_mode, and requires BeautifulSoup
+ python module.
+options:
+ balancer_url_suffix:
+ default: /balancer-manager/
+ description:
+ - Suffix of the balancer pool url required to access the balancer pool
+ status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
+ required: false
+ balancer_vhost:
+ default: None
+ description:
+ - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
+ required: true
+ member_host:
+ default: None
+ description:
+ - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
+ Port number is autodetected and should not be specified here.
+ If undefined, apache2_mod_proxy module will return a members list of
+ dictionaries of all the current balancer pool members' attributes.
+ required: false
+ state:
+ default: None
+ description:
+ - Desired state of the member host.
+ (absent|disabled),drained,hot_standby,ignore_errors can be
+ simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
+ required: false
+ choices: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]
+ tls:
+ default: false
+ description:
+ - Use https to access balancer management page.
+ choices: ["true", "false"]
+ validate_certs:
+ default: true
+ description:
+ - Validate ssl/tls certificates.
+ choices: ["true", "false"]
+'''
+
+EXAMPLES = '''
+# Get all current balancer pool members' attributes:
+- apache2_mod_proxy:
+ balancer_vhost: 10.0.0.2
+
+# Get a specific member's attributes:
+- apache2_mod_proxy:
+ balancer_vhost: myws.mydomain.org
+ balancer_suffix: /lb/
+ member_host: node1.myws.mydomain.org
+
+# Enable all balancer pool members:
+- apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ register: result
+- apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ member_host: '{{ item.host }}'
+ state: present
+ with_items: '{{ result.members }}'
+
+# Gracefully disable a member from a loadbalancer node:
+- apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: drained
+ delegate_to: myloadbalancernode
+- wait_for:
+ host: '{{ member.host }}'
+ port: '{{ member.port }}'
+ state: drained
+ delegate_to: myloadbalancernode
+- apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: absent
+ delegate_to: myloadbalancernode
+'''
+
+RETURN = '''
+member:
+ description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
+ type: dict
+ returned: success
+ sample:
+ {"attributes":
+ {"Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ }
+members:
+ description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
+ returned: success
+ type: list
+ sample:
+ [{"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ },
+ {"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.21",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false}
+ }
+ ]
+'''
+
+import re
+
+try:
+ from BeautifulSoup import BeautifulSoup
+except ImportError:
+ HAS_BEAUTIFULSOUP = False
+else:
+ HAS_BEAUTIFULSOUP = True
+
+# balancer member attributes extraction regexp:
+EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
+# Apache2 server version extraction regexp:
+APACHE_VERSION_EXPRESSION = r"Server Version: Apache/([\d.]+) \(([\w]+)\)"
+
+def regexp_extraction(string, _regexp, groups=1):
+ """ Returns the capture group (default=1) specified in the regexp, applied to the string """
+ regexp_search = re.search(string=str(string), pattern=str(_regexp))
+ if regexp_search:
+ if regexp_search.group(groups) != '':
+ return str(regexp_search.group(groups))
+ return None
+
+class BalancerMember(object):
+ """ Apache 2.4 mod_proxy LB balancer member.
+ attributes:
+ read-only:
+ host -> member host (string),
+ management_url -> member management url (string),
+ protocol -> member protocol (string)
+ port -> member port (string),
+ path -> member location (string),
+ balancer_url -> url of this member's parent balancer (string),
+ attributes -> whole member attributes (dictionary)
+ module -> ansible module instance (AnsibleModule object).
+ writable:
+ status -> status of the member (dictionary)
+ """
+
+ def __init__(self, management_url, balancer_url, module):
+ self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
+ self.management_url = str(management_url)
+ self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
+ self.port = regexp_extraction(management_url, EXPRESSION, 5)
+ self.path = regexp_extraction(management_url, EXPRESSION, 6)
+ self.balancer_url = str(balancer_url)
+ self.module = module
+
+ def get_member_attributes(self):
+ """ Returns a dictionary of a balancer member's attributes."""
+
+ balancer_member_page = fetch_url(self.module, self.management_url)
+
+ try:
+ assert balancer_member_page[1]['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
+ else:
+ try:
+ soup = BeautifulSoup(balancer_member_page[0])
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup))
+ else:
+ subsoup = soup.findAll('table')[1].findAll('tr')
+ keys = subsoup[0].findAll('th')
+ for valuesset in subsoup[1::1]:
+ if re.search(pattern=self.host, string=str(valuesset)):
+ values = valuesset.findAll('td')
+ return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
+
+ def get_member_status(self):
+ """ Returns a dictionary of a balancer member's status attributes."""
+ status_mapping = {'disabled':'Dis',
+ 'drained':'Drn',
+ 'hot_standby':'Stby',
+ 'ignore_errors':'Ign'}
+ status = {}
+ actual_status = str(self.attributes['Status'])
+ for mode in status_mapping.keys():
+ if re.search(pattern=status_mapping[mode], string=actual_status):
+ status[mode] = True
+ else:
+ status[mode] = False
+ return status
+
+ def set_member_status(self, values):
+ """ Sets a balancer member's status attributes amongst pre-mapped values."""
+ values_mapping = {'disabled':'&w_status_D',
+ 'drained':'&w_status_N',
+ 'hot_standby':'&w_status_H',
+ 'ignore_errors':'&w_status_I'}
+
+ request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
+ for k in values_mapping.keys():
+ if values[str(k)]:
+ request_body = request_body + str(values_mapping[k]) + '=1'
+ else:
+ request_body = request_body + str(values_mapping[k]) + '=0'
+
+ response = fetch_url(self.module, self.management_url, data=str(request_body))
+ try:
+ assert response[1]['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
+
+ attributes = property(get_member_attributes)
+ status = property(get_member_status, set_member_status)
+
+
+class Balancer(object):
+ """ Apache httpd 2.4 mod_proxy balancer object"""
+ def __init__(self, host, suffix, module, members=None, tls=False):
+ if tls:
+ self.base_url = str(str('https://') + str(host))
+ self.url = str(str('https://') + str(host) + str(suffix))
+ else:
+ self.base_url = str(str('http://') + str(host))
+ self.url = str(str('http://') + str(host) + str(suffix))
+ self.module = module
+ self.page = self.fetch_balancer_page()
+ if members is None:
+ self._members = []
+
+ def fetch_balancer_page(self):
+ """ Returns the balancer management html page as a string for later parsing."""
+ page = fetch_url(self.module, str(self.url))
+ try:
+ assert page[1]['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
+ else:
+ content = page[0].read()
+ apache_version = regexp_extraction(content, APACHE_VERSION_EXPRESSION, 1)
+ if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
+ self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
+ return content
+
+ def get_balancer_members(self):
+ """ Returns members of the balancer as a generator object for later iteration."""
+ try:
+ soup = BeautifulSoup(self.page)
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
+ else:
+ for element in soup.findAll('a')[1::1]:
+ balancer_member_suffix = str(element.get('href'))
+ try:
+ assert balancer_member_suffix is not ''
+ except AssertionError:
+ self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
+ else:
+ yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
+
+ members = property(get_balancer_members)
+
+def main():
+ """ Initiates module."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ balancer_vhost=dict(required=True, default=None, type='str'),
+ balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
+ member_host=dict(type='str'),
+ state=dict(type='str'),
+ tls=dict(default=False, type='bool'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_BEAUTIFULSOUP is False:
+ module.fail_json(msg="python module 'BeautifulSoup' is required!")
+
+ if module.params['state'] != None:
+ states = module.params['state'].split(',')
+ if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
+ module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
+ else:
+ for _state in states:
+ if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
+ module.fail_json(msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'.")
+ else:
+ states = ['None']
+
+ mybalancer = Balancer(module.params['balancer_vhost'],
+ module.params['balancer_url_suffix'],
+ module=module,
+ tls=module.params['tls'])
+
+ if module.params['member_host'] is None:
+ json_output_list = []
+ for member in mybalancer.members:
+ json_output_list.append({
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ })
+ module.exit_json(
+ changed=False,
+ members=json_output_list
+ )
+ else:
+ changed = False
+ member_exists = False
+ member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors':False}
+ for mode in member_status.keys():
+ for state in states:
+ if mode == state:
+ member_status[mode] = True
+ elif mode == 'disabled' and state == 'absent':
+ member_status[mode] = True
+
+ for member in mybalancer.members:
+ if str(member.host) == str(module.params['member_host']):
+ member_exists = True
+ if module.params['state'] is not None:
+ member_status_before = member.status
+ if not module.check_mode:
+ member_status_after = member.status = member_status
+ else:
+ member_status_after = member_status
+ if member_status_before != member_status_after:
+ changed = True
+ json_output = {
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ }
+ if member_exists:
+ module.exit_json(
+ changed=changed,
+ member=json_output
+ )
+ else:
+ module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+if __name__ == '__main__':
+ main()
diff --git a/web_infrastructure/deploy_helper.py b/web_infrastructure/deploy_helper.py
new file mode 100644
index 00000000000..a40abda2427
--- /dev/null
+++ b/web_infrastructure/deploy_helper.py
@@ -0,0 +1,534 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jasper N. Brouwer
+# (c) 2014, Ramon de la Fuente
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: deploy_helper
+version_added: "2.0"
+author: "Ramon de la Fuente (@ramondelafuente)"
+short_description: Manages some of the steps common in deploying projects.
+description:
+ - The Deploy Helper manages some of the steps common in deploying software.
+ It creates a folder structure, manages a symlink for the current release
+ and cleans up old releases.
+ - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
+ C(project_path), whatever you set in the path parameter,
+ C(current_path), the path to the symlink that points to the active release,
+ C(releases_path), the path to the folder to keep releases in,
+ C(shared_path), the path to the folder to keep shared resources in,
+ C(unfinished_filename), the file to check for to recognize unfinished builds,
+ C(previous_release), the release the 'current' symlink is pointing to,
+ C(previous_release_path), the full path to the 'current' symlink target,
+ C(new_release), either the 'release' parameter or a generated timestamp,
+ C(new_release_path), the path to the new release folder (not created by the module)."
+
+options:
+ path:
+ required: True
+ aliases: ['dest']
+ description:
+ - the root path of the project. Alias I(dest).
+ Returned in the C(deploy_helper.project_path) fact.
+
+ state:
+ required: False
+ choices: [ present, finalize, absent, clean, query ]
+ default: present
+ description:
+ - the state of the project.
+ C(query) will only gather facts,
+ C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
+ C(finalize) will remove the unfinished_filename file, create a symlink to the newly
+ deployed release and optionally clean old releases,
+ C(clean) will remove failed & old releases,
+ C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent))
+
+ release:
+ required: False
+ default: None
+ description:
+ - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
+ This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
+ You can use the generated fact C(release={{ deploy_helper.new_release }}).
+
+ releases_path:
+ required: False
+ default: releases
+ description:
+ - the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
+ Returned in the C(deploy_helper.releases_path) fact.
+
+ shared_path:
+ required: False
+ default: shared
+ description:
+ - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
+ If this is set to an empty string, no shared folder will be created.
+ Returned in the C(deploy_helper.shared_path) fact.
+
+ current_path:
+ required: False
+ default: current
+ description:
+ - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
+ Returned in the C(deploy_helper.current_path) fact.
+
+ unfinished_filename:
+ required: False
+ default: DEPLOY_UNFINISHED
+ description:
+ - the name of the file that indicates a deploy has not finished. All folders in the releases_path that
+ contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
+ automatically deleted from the I(new_release_path) during C(state=finalize).
+
+ clean:
+ required: False
+ default: True
+ description:
+ - Whether to run the clean procedure in case of C(state=finalize).
+
+ keep_releases:
+ required: False
+ default: 5
+ description:
+ - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
+ will be deleted first, so only correct releases will count. The current version will not count.
+
+notes:
+ - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
+ parameters to both calls, otherwise the second call will overwrite the facts of the first one.
+ - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
+ new naming strategy without problems.
+ - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
+ unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
+ be much of a problem.
+'''
+
+EXAMPLES = '''
+
+# General explanation, starting with an example folder structure for a project:
+
+root:
+ releases:
+ - 20140415234508
+ - 20140415235146
+ - 20140416082818
+
+ shared:
+ - sessions
+ - uploads
+
+ current: -> releases/20140416082818
+
+
+The 'releases' folder holds all the available releases. A release is a complete build of the application being
+deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
+Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
+git tags or commit hashes.
+
+During a deploy, a new folder should be created in the releases folder and any build steps required should be
+performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
+with a link to this build.
+
+The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
+session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
+folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
+
+The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
+The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
+release is reduced to the time it takes to switch the link.
+
+To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
+that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
+procedure to remove it during cleanup.
+
+
+# Typical usage:
+- name: Initialize the deploy root and gather facts
+ deploy_helper:
+ path: /path/to/root
+- name: Clone the project to the new release folder
+ git:
+ repo: 'git://foosball.example.org/path/to/repo.git'
+ dest: '{{ deploy_helper.new_release_path }}'
+ version: 'v1.1.1'
+- name: Add an unfinished file, to allow cleanup on successful finalize
+ file:
+ path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
+ state: touch
+- name: Perform some build steps, like running your dependency manager for example
+ composer:
+ command: install
+ working_dir: '{{ deploy_helper.new_release_path }}'
+- name: Create some folders in the shared folder
+ file:
+ path: '{{ deploy_helper.shared_path }}/{{ item }}'
+ state: directory
+ with_items:
+ - sessions
+ - uploads
+- name: Add symlinks from the new release to the shared folder
+ file:
+ path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
+ src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
+ state: link
+ with_items:
+ - path: app/sessions
+ src: sessions
+ - path: web/uploads
+ src: uploads
+- name: Finalize the deploy, removing the unfinished file and switching the symlink
+ deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Retrieving facts before running a deploy
+- name: Run 'state=query' to gather facts without changing anything
+ deploy_helper:
+ path: /path/to/root
+ state: query
+# Remember to set the 'release' parameter when you actually call 'state=present' later
+- name: Initialize the deploy root
+ deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: present
+
+# all paths can be absolute or relative (to the 'path' parameter)
+- deploy_helper:
+ path: /path/to/root
+ releases_path: /var/www/project/releases
+ shared_path: /var/www/shared
+ current_path: /var/www/active
+
+# Using your own naming strategy for releases (a version tag in this case):
+- deploy_helper:
+ path: /path/to/root
+ release: 'v1.1.1'
+ state: present
+- deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Using a different unfinished_filename:
+- deploy_helper:
+ path: /path/to/root
+ unfinished_filename: README.md
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Postponing the cleanup of older builds:
+- deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ clean: False
+- deploy_helper:
+ path: /path/to/root
+ state: clean
+# Or running the cleanup ahead of the new deploy
+- deploy_helper:
+ path: /path/to/root
+ state: clean
+- deploy_helper:
+ path: /path/to/root
+ state: present
+
+# Keeping more old releases:
+- deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ keep_releases: 10
+# Or, if you use 'clean=false' on finalize:
+- deploy_helper:
+ path: /path/to/root
+ state: clean
+ keep_releases: 10
+
+# Removing the entire project root folder
+- deploy_helper:
+ path: /path/to/root
+ state: absent
+
+# Debugging the facts returned by the module
+- deploy_helper:
+ path: /path/to/root
+- debug:
+ var: deploy_helper
+'''
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+class DeployHelper(object):
+
+ def __init__(self, module):
+ module.params['path'] = os.path.expanduser(module.params['path'])
+
+ self.module = module
+ self.file_args = module.load_file_common_arguments(module.params)
+
+ self.clean = module.params['clean']
+ self.current_path = module.params['current_path']
+ self.keep_releases = module.params['keep_releases']
+ self.path = module.params['path']
+ self.release = module.params['release']
+ self.releases_path = module.params['releases_path']
+ self.shared_path = module.params['shared_path']
+ self.state = module.params['state']
+ self.unfinished_filename = module.params['unfinished_filename']
+
+ def gather_facts(self):
+ current_path = os.path.join(self.path, self.current_path)
+ releases_path = os.path.join(self.path, self.releases_path)
+ if self.shared_path:
+ shared_path = os.path.join(self.path, self.shared_path)
+ else:
+ shared_path = None
+
+ previous_release, previous_release_path = self._get_last_release(current_path)
+
+ if not self.release and (self.state == 'query' or self.state == 'present'):
+ self.release = time.strftime("%Y%m%d%H%M%S")
+
+ new_release_path = os.path.join(releases_path, self.release)
+
+ return {
+ 'project_path': self.path,
+ 'current_path': current_path,
+ 'releases_path': releases_path,
+ 'shared_path': shared_path,
+ 'previous_release': previous_release,
+ 'previous_release_path': previous_release_path,
+ 'new_release': self.release,
+ 'new_release_path': new_release_path,
+ 'unfinished_filename': self.unfinished_filename
+ }
+
+ def delete_path(self, path):
+ if not os.path.lexists(path):
+ return False
+
+ if not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ if not self.module.check_mode:
+ try:
+ shutil.rmtree(path, ignore_errors=False)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg="rmtree failed: %s" % str(e))
+
+ return True
+
+ def create_path(self, path):
+ changed = False
+
+ if not os.path.lexists(path):
+ changed = True
+ if not self.module.check_mode:
+ os.makedirs(path)
+
+ elif not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
+
+ return changed
+
+ def check_link(self, path):
+ if os.path.lexists(path):
+ if not os.path.islink(path):
+ self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
+
+ def create_link(self, source, link_name):
+ changed = False
+
+ if os.path.islink(link_name):
+ norm_link = os.path.normpath(os.path.realpath(link_name))
+ norm_source = os.path.normpath(os.path.realpath(source))
+ if norm_link == norm_source:
+ changed = False
+ else:
+ changed = True
+ if not self.module.check_mode:
+ if not os.path.lexists(source):
+ self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
+ tmp_link_name = link_name + '.' + self.unfinished_filename
+ if os.path.islink(tmp_link_name):
+ os.unlink(tmp_link_name)
+ os.symlink(source, tmp_link_name)
+ os.rename(tmp_link_name, link_name)
+ else:
+ changed = True
+ if not self.module.check_mode:
+ os.symlink(source, link_name)
+
+ return changed
+
+ def remove_unfinished_file(self, new_release_path):
+ changed = False
+ unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
+ if os.path.lexists(unfinished_file_path):
+ changed = True
+ if not self.module.check_mode:
+ os.remove(unfinished_file_path)
+
+ return changed
+
+ def remove_unfinished_builds(self, releases_path):
+ changes = 0
+
+ for release in os.listdir(releases_path):
+ if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
+ if self.module.check_mode:
+ changes += 1
+ else:
+ changes += self.delete_path(os.path.join(releases_path, release))
+
+ return changes
+
+ def remove_unfinished_link(self, path):
+ changed = False
+
+ tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
+ if not self.module.check_mode and os.path.exists(tmp_link_name):
+ changed = True
+ os.remove(tmp_link_name)
+
+ return changed
+
+ def cleanup(self, releases_path, reserve_version):
+ changes = 0
+
+ if os.path.lexists(releases_path):
+ releases = [ f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path,f)) ]
+ try:
+ releases.remove(reserve_version)
+ except ValueError:
+ pass
+
+ if not self.module.check_mode:
+ releases.sort( key=lambda x: os.path.getctime(os.path.join(releases_path,x)), reverse=True)
+ for release in releases[self.keep_releases:]:
+ changes += self.delete_path(os.path.join(releases_path, release))
+ elif len(releases) > self.keep_releases:
+ changes += (len(releases) - self.keep_releases)
+
+ return changes
+
+ def _get_file_args(self, path):
+ file_args = self.file_args.copy()
+ file_args['path'] = path
+ return file_args
+
+ def _get_last_release(self, current_path):
+ previous_release = None
+ previous_release_path = None
+
+ if os.path.lexists(current_path):
+ previous_release_path = os.path.realpath(current_path)
+ previous_release = os.path.basename(previous_release_path)
+
+ return previous_release, previous_release_path
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ path = dict(aliases=['dest'], required=True, type='str'),
+ release = dict(required=False, type='str', default=None),
+ releases_path = dict(required=False, type='str', default='releases'),
+ shared_path = dict(required=False, type='str', default='shared'),
+ current_path = dict(required=False, type='str', default='current'),
+ keep_releases = dict(required=False, type='int', default=5),
+ clean = dict(required=False, type='bool', default=True),
+ unfinished_filename = dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
+ state = dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
+ ),
+ add_file_common_args = True,
+ supports_check_mode = True
+ )
+
+ deploy_helper = DeployHelper(module)
+ facts = deploy_helper.gather_facts()
+
+ result = {
+ 'state': deploy_helper.state
+ }
+
+ changes = 0
+
+ if deploy_helper.state == 'query':
+ result['ansible_facts'] = { 'deploy_helper': facts }
+
+ elif deploy_helper.state == 'present':
+ deploy_helper.check_link(facts['current_path'])
+ changes += deploy_helper.create_path(facts['project_path'])
+ changes += deploy_helper.create_path(facts['releases_path'])
+ if deploy_helper.shared_path:
+ changes += deploy_helper.create_path(facts['shared_path'])
+
+ result['ansible_facts'] = { 'deploy_helper': facts }
+
+ elif deploy_helper.state == 'finalize':
+ if not deploy_helper.release:
+ module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
+ if deploy_helper.keep_releases <= 0:
+ module.fail_json(msg="'keep_releases' should be at least 1")
+
+ changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
+ changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
+ if deploy_helper.clean:
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'clean':
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'absent':
+ # destroy the facts
+ result['ansible_facts'] = { 'deploy_helper': [] }
+ changes += deploy_helper.delete_path(facts['project_path'])
+
+ if changes > 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/web_infrastructure/ejabberd_user.py b/web_infrastructure/ejabberd_user.py
index bf86806ad52..84a8dadbf63 100644
--- a/web_infrastructure/ejabberd_user.py
+++ b/web_infrastructure/ejabberd_user.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ejabberd_user
@@ -59,15 +63,22 @@
EXAMPLES = '''
Example playbook entries using the ejabberd_user module to manage users state.
- tasks:
-
- - name: create a user if it does not exists
- action: ejabberd_user username=test host=server password=password
-
- - name: delete a user if it exists
- action: ejabberd_user username=test host=server state=absent
+- name: create a user if it does not exists
+ ejabberd_user:
+ username: test
+ host: server
+ password: password
+
+- name: delete a user if it exists
+ ejabberd_user:
+ username: test
+ host: server
+ state: absent
'''
+
import syslog
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.basic import *
class EjabberdUserException(Exception):
""" Base exeption for EjabberdUser class object """
@@ -98,7 +109,8 @@ def changed(self):
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('check_password', options)
- except EjabberdUserException, e:
+ except EjabberdUserException:
+ e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return rc
@@ -111,14 +123,15 @@ def exists(self):
try:
options = [self.user, self.host]
(rc, out, err) = self.run_command('check_account', options)
- except EjabberdUserException, e:
+ except EjabberdUserException:
+ e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return not bool(int(rc))
def log(self, entry):
""" This method will log information to the local syslog facility """
if self.logging:
- syslog.openlog('ansible-%s' % os.path.basename(__file__))
+ syslog.openlog('ansible-%s' % self.module._name)
syslog.syslog(syslog.LOG_NOTICE, entry)
def run_command(self, cmd, options):
@@ -139,7 +152,8 @@ def update(self):
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('change_password', options)
- except EjabberdUserException, e:
+ except EjabberdUserException:
+ e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
@@ -150,7 +164,8 @@ def create(self):
try:
options = [self.user, self.host, self.pwd]
(rc, out, err) = self.run_command('register', options)
- except EjabberdUserException, e:
+ except EjabberdUserException:
+ e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
@@ -160,7 +175,8 @@ def delete(self):
try:
options = [self.user, self.host]
(rc, out, err) = self.run_command('unregister', options)
- except EjabberdUserException, e:
+ except EjabberdUserException:
+ e = get_exception()
(rc, out, err) = (1, None, "required attribute(s) missing")
return (rc, out, err)
@@ -209,6 +225,5 @@ def main():
module.exit_json(**result)
-# import module snippets
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/web_infrastructure/jboss.py b/web_infrastructure/jboss.py
index 9ec67b7c7b1..738b536782d 100644
--- a/web_infrastructure/jboss.py
+++ b/web_infrastructure/jboss.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
module: jboss
version_added: "1.4"
@@ -52,11 +56,21 @@
EXAMPLES = """
# Deploy a hello world application
-- jboss: src=/tmp/hello-1.0-SNAPSHOT.war deployment=hello.war state=present
+- jboss:
+ src: /tmp/hello-1.0-SNAPSHOT.war
+ deployment: hello.war
+ state: present
+
# Update the hello world application
-- jboss: src=/tmp/hello-1.1-SNAPSHOT.war deployment=hello.war state=present
+- jboss:
+ src: /tmp/hello-1.1-SNAPSHOT.war
+ deployment: hello.war
+ state: present
+
# Undeploy the hello world application
-- jboss: deployment=hello.war state=absent
+- jboss:
+ deployment: hello.war
+ state: absent
"""
import os
@@ -137,4 +151,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/web_infrastructure/jenkins_job.py b/web_infrastructure/jenkins_job.py
new file mode 100644
index 00000000000..0c91c8b876e
--- /dev/null
+++ b/web_infrastructure/jenkins_job.py
@@ -0,0 +1,362 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: jenkins_job
+short_description: Manage jenkins jobs
+description:
+ - Manage Jenkins jobs by using Jenkins REST API.
+requirements:
+ - "python-jenkins >= 0.4.12"
+ - "lxml >= 3.3.3"
+version_added: "2.2"
+author: "Sergio Millan Rodriguez (@sermilrod)"
+options:
+ config:
+ description:
+ - config in XML format.
+ - Required if job does not yet exist.
+ - Mututally exclusive with C(enabled).
+ - Considered if C(state=present).
+ required: false
+ enabled:
+ description:
+ - Whether the job should be enabled or disabled.
+ - Mututally exclusive with C(config).
+ - Considered if C(state=present).
+ required: false
+ name:
+ description:
+ - Name of the Jenkins job.
+ required: true
+ password:
+ description:
+ - Password to authenticate with the Jenkins server.
+ required: false
+ state:
+ description:
+ - Attribute that specifies if the job has to be created or deleted.
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ token:
+ description:
+ - API token used to authenticate alternatively to password.
+ required: false
+ url:
+ description:
+ - Url where the Jenkins server is accessible.
+ required: false
+ default: http://localhost:8080
+ user:
+ description:
+ - User to authenticate with the Jenkins server.
+ required: false
+'''
+
+EXAMPLES = '''
+# Create a jenkins job using basic authentication
+- jenkins_job:
+ config: "{{ lookup('file', 'templates/test.xml') }}"
+ name: test
+ password: admin
+ url: "http://localhost:8080"
+ user: admin
+
+# Create a jenkins job using the token
+- jenkins_job:
+ config: "{{ lookup('template', 'templates/test.xml.j2') }}"
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ url: "http://localhost:8080"
+ user: admin
+
+# Delete a jenkins job using basic authentication
+- jenkins_job:
+ name: test
+ password: admin
+ state: absent
+ url: "http://localhost:8080"
+ user: admin
+
+# Delete a jenkins job using the token
+- jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ state: absent
+ url: "http://localhost:8080"
+ user: admin
+
+# Disable a jenkins job using basic authentication
+- jenkins_job:
+ name: test
+ password: admin
+ enabled: false
+ url: "http://localhost:8080"
+ user: admin
+
+# Disable a jenkins job using the token
+- jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ enabled: false
+ url: "http://localhost:8080"
+ user: admin
+'''
+
+RETURN = '''
+---
+name:
+ description: Name of the jenkins job.
+ returned: success
+ type: string
+ sample: test-job
+state:
+ description: State of the jenkins job.
+ returned: success
+ type: string
+ sample: present
+enabled:
+ description: Whether the jenkins job is enabled or not.
+ returned: success
+ type: bool
+ sample: true
+user:
+ description: User used for authentication.
+ returned: success
+ type: string
+ sample: admin
+url:
+ description: Url to connect to the Jenkins server.
+ returned: success
+ type: string
+ sample: https://jenkins.mydomain.com
+'''
+
+try:
+ import jenkins
+ python_jenkins_installed = True
+except ImportError:
+ python_jenkins_installed = False
+
+try:
+ from lxml import etree as ET
+ python_lxml_installed = True
+except ImportError:
+ python_lxml_installed = False
+
+class JenkinsJob:
+ def __init__(self, module):
+ self.module = module
+
+ self.config = module.params.get('config')
+ self.name = module.params.get('name')
+ self.password = module.params.get('password')
+ self.state = module.params.get('state')
+ self.enabled = module.params.get('enabled')
+ self.token = module.params.get('token')
+ self.user = module.params.get('user')
+ self.jenkins_url = module.params.get('url')
+ self.server = self.get_jenkins_connection()
+
+ self.result = {
+ 'changed': False,
+ 'url': self.jenkins_url,
+ 'name': self.name,
+ 'user': self.user,
+ 'state': self.state,
+ 'diff': {
+ 'before': "",
+ 'after': ""
+ }
+ }
+
+ def get_jenkins_connection(self):
+ try:
+ if (self.user and self.password):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
+ elif (self.user and self.token):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
+ elif (self.user and not (self.password or self.token)):
+ return jenkins.Jenkins(self.jenkins_url, self.user)
+ else:
+ return jenkins.Jenkins(self.jenkins_url)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % str(e))
+
+ def get_job_status(self):
+ try:
+ return self.server.get_job_info(self.name)['color'].encode('utf-8')
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to fetch job information, %s' % str(e))
+
+ def job_exists(self):
+ try:
+ return bool(self.server.job_exists(self.name))
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (str(e), self.jenkins_url))
+
+ def get_config(self):
+ return job_config_to_string(self.config)
+
+ def get_current_config(self):
+ return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8'))
+
+ def has_config_changed(self):
+ # config is optional, if not provided we keep the current config as is
+ if self.config is None:
+ return False
+
+ config_file = self.get_config()
+ machine_file = self.get_current_config()
+
+ self.result['diff']['after'] = config_file
+ self.result['diff']['before'] = machine_file
+
+ if machine_file != config_file:
+ return True
+ return False
+
+ def present_job(self):
+ if self.config is None and self.enabled is None:
+ self.module.fail_json(msg='one of the following params is required on state=present: config,enabled')
+
+ if not self.job_exists():
+ self.create_job()
+ else:
+ self.update_job()
+
+ def has_state_changed(self, status):
+ # Keep in current state if enabled arg_spec is not given
+ if self.enabled is None:
+ return False
+
+ if ( (self.enabled == False and status != "disabled") or (self.enabled == True and status == "disabled") ):
+ return True
+ return False
+
+ def switch_state(self):
+ if self.enabled == False:
+ self.server.disable_job(self.name)
+ else:
+ self.server.enable_job(self.name)
+
+ def update_job(self):
+ try:
+ status = self.get_job_status()
+
+ # Handle job config
+ if self.has_config_changed():
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.server.reconfig_job(self.name, self.get_config())
+
+ # Handle job disable/enable
+ elif self.has_state_changed(status):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.switch_state()
+
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (str(e), self.jenkins_url))
+
+ def create_job(self):
+ if self.config is None:
+ self.module.fail_json(msg='missing required param: config')
+
+ self.result['changed'] = True
+ try:
+ config_file = self.get_config()
+ self.result['diff']['after'] = config_file
+ if not self.module.check_mode:
+ self.server.create_job(self.name, config_file)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to create job, %s for %s' % (str(e), self.jenkins_url))
+
+ def absent_job(self):
+ if self.job_exists():
+ self.result['changed'] = True
+ self.result['diff']['before'] = self.get_current_config()
+ if not self.module.check_mode:
+ try:
+ self.server.delete_job(self.name)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg='Unable to delete job, %s for %s' % (str(e), self.jenkins_url))
+
+ def get_result(self):
+ result = self.result
+ if self.job_exists():
+ result['enabled'] = self.get_job_status() != "disabled"
+ else:
+ result['enabled'] = None
+ return result
+
+def test_dependencies(module):
+ if not python_jenkins_installed:
+ module.fail_json(msg="python-jenkins required for this module. "\
+ "see http://python-jenkins.readthedocs.io/en/latest/install.html")
+
+ if not python_lxml_installed:
+ module.fail_json(msg="lxml required for this module. "\
+ "see http://lxml.de/installation.html")
+
+def job_config_to_string(xml_str):
+ return ET.tostring(ET.fromstring(xml_str))
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ config = dict(required=False),
+ name = dict(required=True),
+ password = dict(required=False, no_log=True),
+ state = dict(required=False, choices=['present', 'absent'], default="present"),
+ enabled = dict(required=False, type='bool'),
+ token = dict(required=False, no_log=True),
+ url = dict(required=False, default="http://localhost:8080"),
+ user = dict(required=False)
+ ),
+ mutually_exclusive = [
+ ['password', 'token'],
+ ['config', 'enabled'],
+ ],
+ supports_check_mode=True,
+ )
+
+ test_dependencies(module)
+ jenkins_job = JenkinsJob(module)
+
+ if module.params.get('state') == "present":
+ jenkins_job.present_job()
+ else:
+ jenkins_job.absent_job()
+
+ result = jenkins_job.get_result()
+ module.exit_json(**result)
+
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/web_infrastructure/jenkins_plugin.py b/web_infrastructure/jenkins_plugin.py
new file mode 100644
index 00000000000..56067c38a60
--- /dev/null
+++ b/web_infrastructure/jenkins_plugin.py
@@ -0,0 +1,833 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2016, Jiri Tyr
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.urls import url_argument_spec
+import base64
+import hashlib
+import json
+import os
+import tempfile
+import time
+import urllib
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: jenkins_plugin
+author: Jiri Tyr (@jtyr)
+version_added: '2.2'
+short_description: Add or remove Jenkins plugin
+description:
+ - Ansible module which helps to manage Jenkins plugins.
+
+options:
+ group:
+ required: false
+ default: jenkins
+ description:
+ - Name of the Jenkins group on the OS.
+ jenkins_home:
+ required: false
+ default: /var/lib/jenkins
+ description:
+ - Home directory of the Jenkins user.
+ mode:
+ required: false
+ default: '0664'
+ description:
+ - File mode applied on versioned plugins.
+ name:
+ required: true
+ description:
+ - Plugin name.
+ owner:
+ required: false
+ default: jenkins
+ description:
+ - Name of the Jenkins user on the OS.
+ params:
+ required: false
+ default: null
+ description:
+ - Option used to allow the user to overwrite any of the other options. To
+ remove an option, set the value of the option to C(null).
+ state:
+ required: false
+ choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
+ default: present
+ description:
+ - Desired plugin state.
+ - If the C(latest) is set, the check for new version will be performed
+ every time. This is suitable to keep the plugin up-to-date.
+ timeout:
+ required: false
+ default: 30
+ description:
+ - Server connection timeout in secs.
+ updates_expiration:
+ required: false
+ default: 86400
+ description:
+ - Number of seconds after which a new copy of the I(update-center.json)
+ file is downloaded. This is used to avoid the need to download the
+ plugin to calculate its checksum when C(latest) is specified.
+ - Set it to C(0) if no cache file should be used. In that case, the
+ plugin file will always be downloaded to calculate its checksum when
+ C(latest) is specified.
+ updates_url:
+ required: false
+ default: https://updates.jenkins-ci.org
+ description:
+ - URL of the Update Centre.
+ - Used as the base URL to download the plugins and the
+ I(update-center.json) JSON file.
+ url:
+ required: false
+ default: http://localhost:8080
+ description:
+ - URL of the Jenkins server.
+ version:
+ required: false
+ default: null
+ description:
+ - Plugin version number.
+ - If this option is specified, all plugin dependencies must be installed
+ manually.
+ - It might take longer to verify that the correct version is installed.
+ This is especially true if a specific version number is specified.
+ with_dependencies:
+ required: false
+ choices: ['yes', 'no']
+ default: 'yes'
+ description:
+ - Defines whether to install plugin dependencies.
+
+notes:
+ - Plugin installation shoud be run under root or the same user which owns
+ the plugin files on the disk. Only if the plugin is not installed yet and
+ no version is specified, the API installation is performed which requires
+ only the Web UI credentials.
+ - It's necessary to notify the handler or call the I(service) module to
+ restart the Jenkins service after a new plugin was installed.
+ - Pinning works only if the plugin is installed and Jenkis service was
+ successfully restarted after the plugin installation.
+ - It is not possible to run the module remotely by changing the I(url)
+ parameter to point to the Jenkins server. The module must be used on the
+ host where Jenkins runs as it needs direct access to the plugin files.
+'''
+
+EXAMPLES = '''
+- name: Install plugin
+ jenkins_plugin:
+ name: build-pipeline-plugin
+
+- name: Install plugin without its dependencies
+ jenkins_plugin:
+ name: build-pipeline-plugin
+ with_dependencies: no
+
+- name: Make sure the plugin is always up-to-date
+ jenkins_plugin:
+ name: token-macro
+ state: latest
+
+- name: Install specific version of the plugin
+ jenkins_plugin:
+ name: token-macro
+ version: 1.15
+
+- name: Pin the plugin
+ jenkins_plugin:
+ name: token-macro
+ state: pinned
+
+- name: Unpin the plugin
+ jenkins_plugin:
+ name: token-macro
+ state: unpinned
+
+- name: Enable the plugin
+ jenkins_plugin:
+ name: token-macro
+ state: enabled
+
+- name: Disable the plugin
+ jenkins_plugin:
+ name: token-macro
+ state: disabled
+
+- name: Uninstall plugin
+ jenkins_plugin:
+ name: build-pipeline-plugin
+ state: absent
+
+#
+# Example of how to use the params
+#
+# Define a variable and specify all default parameters you want to use across
+# all jenkins_plugin calls:
+#
+# my_jenkins_params:
+# url_username: admin
+# url_password: p4ssw0rd
+# url: http://localhost:8888
+#
+- name: Install plugin
+ jenkins_plugin:
+ name: build-pipeline-plugin
+ params: "{{ my_jenkins_params }}"
+
+#
+# Example of a Play which handles Jenkins restarts during the state changes
+#
+- name: Jenkins Master play
+ hosts: jenkins-master
+ vars:
+ my_jenkins_plugins:
+ token-macro:
+ enabled: yes
+ build-pipeline-plugin:
+ version: 1.4.9
+ pinned: no
+ enabled: yes
+ tasks:
+ - name: Install plugins without a specific version
+ jenkins_plugin:
+ name: "{{ item.key }}"
+ register: my_jenkins_plugin_unversioned
+ when: >
+ 'version' not in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Install plugins with a specific version
+ jenkins_plugin:
+ name: "{{ item.key }}"
+ version: "{{ item.value['version'] }}"
+ register: my_jenkins_plugin_versioned
+ when: >
+ 'version' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Initiate the fact
+ set_fact:
+ jenkins_restart_required: no
+
+ - name: Check if restart is required by any of the versioned plugins
+ set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_versioned.results }}"
+
+ - name: Check if restart is required by any of the unversioned plugins
+ set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_unversioned.results }}"
+
+ - name: Restart Jenkins if required
+ service:
+ name: jenkins
+ state: restarted
+ when: jenkins_restart_required
+
+ - name: Wait for Jenkins to start up
+ uri:
+ url: http://localhost:8080
+ status_code: 200
+ timeout: 5
+ register: jenkins_service_status
+ # Keep trying for 5 mins in 5 sec intervals
+ retries: 60
+ delay: 5
+ until: >
+ 'status' in jenkins_service_status and
+ jenkins_service_status['status'] == 200
+ when: jenkins_restart_required
+
+ - name: Reset the fact
+ set_fact:
+ jenkins_restart_required: no
+ when: jenkins_restart_required
+
+ - name: Plugin pinning
+ jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
+ when: >
+ 'pinned' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Plugin enabling
+ jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
+ when: >
+ 'enabled' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+'''
+
+RETURN = '''
+plugin:
+ description: plugin name
+ returned: success
+ type: string
+ sample: build-pipeline-plugin
+state:
+ description: state of the target, after execution
+ returned: success
+ type: string
+ sample: "present"
+'''
+
+
+class JenkinsPlugin(object):
+ def __init__(self, module):
+ # To be able to call fail_json
+ self.module = module
+
+ # Shortcuts for the params
+ self.params = self.module.params
+ self.url = self.params['url']
+ self.timeout = self.params['timeout']
+
+ # Crumb
+ self.crumb = {}
+
+ if self._csrf_enabled():
+ self.crumb = self._get_crumb()
+
+ # Get list of installed plugins
+ self._get_installed_plugins()
+
+ def _csrf_enabled(self):
+ csrf_data = self._get_json_data(
+ "%s/%s" % (self.url, "api/json"), 'CSRF')
+
+ return csrf_data["useCrumbs"]
+
+ def _get_json_data(self, url, what, **kwargs):
+ # Get the JSON data
+ r = self._get_url_data(url, what, **kwargs)
+
+ # Parse the JSON data
+ try:
+ json_data = json.load(r)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot parse %s JSON data." % what,
+ details=e.message)
+
+ return json_data
+
+ def _get_url_data(
+ self, url, what=None, msg_status=None, msg_exception=None,
+ **kwargs):
+ # Compose default messages
+ if msg_status is None:
+ msg_status = "Cannot get %s" % what
+
+ if msg_exception is None:
+ msg_exception = "Retrieval of %s failed." % what
+
+ # Get the URL data
+ try:
+ response, info = fetch_url(
+ self.module, url, timeout=self.timeout, **kwargs)
+
+ if info['status'] != 200:
+ self.module.fail_json(msg=msg_status, details=info['msg'])
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(msg=msg_exception, details=e.message)
+
+ return response
+
+ def _get_crumb(self):
+ crumb_data = self._get_json_data(
+ "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
+
+ if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
+ ret = {
+ crumb_data['crumbRequestField']: crumb_data['crumb']
+ }
+ else:
+ self.module.fail_json(
+ msg="Required fields not found in the Crum response.",
+ details=crumb_data)
+
+ return ret
+
+ def _get_installed_plugins(self):
+ plugins_data = self._get_json_data(
+ "%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
+ 'list of plugins')
+
+ # Check if we got valid data
+ if 'plugins' not in plugins_data:
+ self.module.fail_json(msg="No valid plugin data found.")
+
+ # Create final list of installed/pined plugins
+ self.is_installed = False
+ self.is_pinned = False
+ self.is_enabled = False
+
+ for p in plugins_data['plugins']:
+ if p['shortName'] == self.params['name']:
+ self.is_installed = True
+
+ if p['pinned']:
+ self.is_pinned = True
+
+ if p['enabled']:
+ self.is_enabled = True
+
+ break
+
+ def install(self):
+ changed = False
+ plugin_file = (
+ '%s/plugins/%s.jpi' % (
+ self.params['jenkins_home'],
+ self.params['name']))
+
+ if not self.is_installed and self.params['version'] is None:
+ if not self.module.check_mode:
+ # Install the plugin (with dependencies)
+ install_script = (
+ 'd = Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.deploy(); d.get();' % self.params['name'])
+
+ if self.params['with_dependencies']:
+ install_script = (
+ 'Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.getNeededDependencies().each{it.deploy()}; %s' % (
+ self.params['name'], install_script))
+
+ script_data = {
+ 'script': install_script
+ }
+ script_data.update(self.crumb)
+ data = urllib.urlencode(script_data)
+
+ # Send the installation request
+ r = self._get_url_data(
+ "%s/scriptText" % self.url,
+ msg_status="Cannot install plugin.",
+ msg_exception="Plugin installation has failed.",
+ data=data)
+
+ changed = True
+ else:
+ # Check if the plugin directory exists
+ if not os.path.isdir(self.params['jenkins_home']):
+ self.module.fail_json(
+ msg="Jenkins home directory doesn't exist.")
+
+ md5sum_old = None
+ if os.path.isfile(plugin_file):
+ # Make the checksum of the currently installed plugin
+ md5sum_old = hashlib.md5(
+ open(plugin_file, 'rb').read()).hexdigest()
+
+ if self.params['version'] in [None, 'latest']:
+ # Take latest version
+ plugin_url = (
+ "%s/latest/%s.hpi" % (
+ self.params['updates_url'],
+ self.params['name']))
+ else:
+ # Take specific version
+ plugin_url = (
+ "{0}/download/plugins/"
+ "{1}/{2}/{1}.hpi".format(
+ self.params['updates_url'],
+ self.params['name'],
+ self.params['version']))
+
+ if (
+ self.params['updates_expiration'] == 0 or
+ self.params['version'] not in [None, 'latest'] or
+ md5sum_old is None):
+
+ # Download the plugin file directly
+ r = self._download_plugin(plugin_url)
+
+ # Write downloaded plugin into file if checksums don't match
+ if md5sum_old is None:
+ # No previously installed plugin
+ if not self.module.check_mode:
+ self._write_file(plugin_file, r)
+
+ changed = True
+ else:
+ # Get data for the MD5
+ data = r.read()
+
+ # Make new checksum
+ md5sum_new = hashlib.md5(data).hexdigest()
+
+ # If the checksum is different from the currently installed
+ # plugin, store the new plugin
+ if md5sum_old != md5sum_new:
+ if not self.module.check_mode:
+ self._write_file(plugin_file, data)
+
+ changed = True
+ else:
+ # Check for update from the updates JSON file
+ plugin_data = self._download_updates()
+
+ try:
+ sha1_old = hashlib.sha1(open(plugin_file, 'rb').read())
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot calculate SHA1 of the old plugin.",
+ details=e.message)
+
+ sha1sum_old = base64.b64encode(sha1_old.digest())
+
+ # If the latest version changed, download it
+ if sha1sum_old != plugin_data['sha1']:
+ if not self.module.check_mode:
+ r = self._download_plugin(plugin_url)
+ self._write_file(plugin_file, r)
+
+ changed = True
+
+ # Change file attributes if needed
+ if os.path.isfile(plugin_file):
+ params = {
+ 'dest': plugin_file
+ }
+ params.update(self.params)
+ file_args = self.module.load_file_common_arguments(params)
+
+ if not self.module.check_mode:
+ # Not sure how to run this in the check mode
+ changed = self.module.set_fs_attributes_if_different(
+ file_args, changed)
+ else:
+ # See the comment above
+ changed = True
+
+ return changed
+
+ def _download_updates(self):
+ updates_filename = 'jenkins-plugin-cache.json'
+ updates_dir = os.path.expanduser('~/.ansible/tmp')
+ updates_file = "%s/%s" % (updates_dir, updates_filename)
+ download_updates = True
+
+ # Check if we need to download new updates file
+ if os.path.isfile(updates_file):
+ # Get timestamp when the file was changed last time
+ ts_file = os.stat(updates_file).st_mtime
+ ts_now = time.time()
+
+ if ts_now - ts_file < self.params['updates_expiration']:
+ download_updates = False
+
+ updates_file_orig = updates_file
+
+ # Download the updates file if needed
+ if download_updates:
+ url = "%s/update-center.json" % self.params['updates_url']
+
+ # Get the data
+ r = self._get_url_data(
+ url,
+ msg_status="Remote updates not found.",
+ msg_exception="Updates download failed.")
+
+ # Write the updates file
+ updates_file = tempfile.mkstemp()
+
+ try:
+ fd = open(updates_file, 'wb')
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot open the tmp updates file %s." % updates_file,
+ details=str(e))
+
+ fd.write(r.read())
+
+ try:
+ fd.close()
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot close the tmp updates file %s." % updates_file,
+ detail=str(e))
+
+ # Open the updates file
+ try:
+ f = open(updates_file)
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot open temporal updates file.",
+ details=str(e))
+
+ i = 0
+ for line in f:
+ # Read only the second line
+ if i == 1:
+ try:
+ data = json.loads(line)
+ except Exception:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot load JSON data from the tmp updates file.",
+ details=e.message)
+
+ break
+
+ i += 1
+
+ # Move the updates file to the right place if we could read it
+ if download_updates:
+ # Make sure the destination directory exists
+ if not os.path.isdir(updates_dir):
+ try:
+ os.makedirs(updates_dir, int('0700', 8))
+ except OSError:
+ e = get_exception()
+ self.module.fail_json(
+ msg="Cannot create temporal directory.",
+ details=e.message)
+
+ self.module.atomic_move(updates_file, updates_file_orig)
+
+ # Check if we have the plugin data available
+ if 'plugins' not in data or self.params['name'] not in data['plugins']:
+ self.module.fail_json(
+ msg="Cannot find plugin data in the updates file.")
+
+ return data['plugins'][self.params['name']]
+
+ def _download_plugin(self, plugin_url):
+ # Download the plugin
+ r = self._get_url_data(
+ plugin_url,
+ msg_status="Plugin not found.",
+ msg_exception="Plugin download failed.")
+
+ return r
+
+ def _write_file(self, f, data):
+ # Store the plugin into a temp file and then move it
+ tmp_f = tempfile.mkstemp()
+
+ try:
+ fd = open(tmp_f, 'wb')
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg='Cannot open the temporal plugin file %s.' % tmp_f,
+ details=str(e))
+
+ if isinstance(data, str):
+ d = data
+ else:
+ d = data.read()
+
+ fd.write(d)
+
+ try:
+ fd.close()
+ except IOError:
+ e = get_exception()
+ self.module.fail_json(
+ msg='Cannot close the temporal plugin file %s.' % tmp_f,
+ details=str(e))
+
+ # Move the file onto the right place
+ self.module.atomic_move(tmp_f, f)
+
+ def uninstall(self):
+ changed = False
+
+ # Perform the action
+ if self.is_installed:
+ if not self.module.check_mode:
+ self._pm_query('doUninstall', 'Uninstallation')
+
+ changed = True
+
+ return changed
+
+ def pin(self):
+ return self._pinning('pin')
+
+ def unpin(self):
+ return self._pinning('unpin')
+
+ def _pinning(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'pin' and not self.is_pinned or
+ action == 'unpin' and self.is_pinned):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(action, "%sning" % action.capitalize())
+
+ changed = True
+
+ return changed
+
+ def enable(self):
+ return self._enabling('enable')
+
+ def disable(self):
+ return self._enabling('disable')
+
+ def _enabling(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'enable' and not self.is_enabled or
+ action == 'disable' and self.is_enabled):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(
+ "make%sd" % action.capitalize(),
+ "%sing" % action[:-1].capitalize())
+
+ changed = True
+
+ return changed
+
+ def _pm_query(self, action, msg):
+ url = "%s/pluginManager/plugin/%s/%s" % (
+ self.params['url'], self.params['name'], action)
+ data = urllib.urlencode(self.crumb)
+
+ # Send the request
+ self._get_url_data(
+ url,
+ msg_status="Plugin not found. %s" % url,
+ msg_exception="%s has failed." % msg,
+ data=data)
+
+
+def main():
+ # Module arguments
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ group=dict(default='jenkins'),
+ jenkins_home=dict(default='/var/lib/jenkins'),
+ mode=dict(default='0644', type='raw'),
+ name=dict(required=True),
+ owner=dict(default='jenkins'),
+ params=dict(type='dict'),
+ state=dict(
+ choices=[
+ 'present',
+ 'absent',
+ 'pinned',
+ 'unpinned',
+ 'enabled',
+ 'disabled',
+ 'latest'],
+ default='present'),
+ timeout=dict(default=30, type="int"),
+ updates_expiration=dict(default=86400, type="int"),
+ updates_url=dict(default='https://updates.jenkins-ci.org'),
+ url=dict(default='http://localhost:8080'),
+ url_password=dict(no_log=True),
+ version=dict(),
+ with_dependencies=dict(default=True, type='bool'),
+ )
+ # Module settings
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Update module parameters by user's parameters if defined
+ if 'params' in module.params and isinstance(module.params['params'], dict):
+ module.params.update(module.params['params'])
+ # Remove the params
+ module.params.pop('params', None)
+
+ # Force basic authentication
+ module.params['force_basic_auth'] = True
+
+ # Convert timeout to float
+ try:
+ module.params['timeout'] = float(module.params['timeout'])
+ except ValueError:
+ e = get_exception()
+ module.fail_json(
+ msg='Cannot convert %s to float.' % module.params['timeout'],
+ details=str(e))
+
+ # Set version to latest if state is latest
+ if module.params['state'] == 'latest':
+ module.params['state'] = 'present'
+ module.params['version'] = 'latest'
+
+ # Create some shortcuts
+ name = module.params['name']
+ state = module.params['state']
+
+ # Initial change state of the task
+ changed = False
+
+ # Instantiate the JenkinsPlugin object
+ jp = JenkinsPlugin(module)
+
+ # Perform action depending on the requested state
+ if state == 'present':
+ changed = jp.install()
+ elif state == 'absent':
+ changed = jp.uninstall()
+ elif state == 'pinned':
+ changed = jp.pin()
+ elif state == 'unpinned':
+ changed = jp.unpin()
+ elif state == 'enabled':
+ changed = jp.enable()
+ elif state == 'disabled':
+ changed = jp.disable()
+
+ # Print status of the change
+ module.exit_json(changed=changed, plugin=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/web_infrastructure/jira.py b/web_infrastructure/jira.py
old mode 100644
new mode 100755
index 79cfb72d4a7..aca751801c4
--- a/web_infrastructure/jira.py
+++ b/web_infrastructure/jira.py
@@ -20,6 +20,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
module: jira
version_added: "1.6"
@@ -91,6 +95,24 @@
description:
- Sets the assignee on create or transition operations. Note not all transitions will allow this.
+ linktype:
+ required: false
+ version_added: 2.3
+ description:
+ - Set type of link, when action 'link' selected
+
+ inwardissue:
+ required: false
+ version_added: 2.3
+ description:
+ - set issue from which link will be created
+
+ outwardissue:
+ required: false
+ version_added: 2.3
+ description:
+ - set issue to which link will be created
+
fields:
required: false
description:
@@ -105,64 +127,110 @@
EXAMPLES = """
# Create a new issue and add a comment to it:
- name: Create an issue
- jira: uri={{server}} username={{user}} password={{pass}}
- project=ANS operation=create
- summary="Example Issue" description="Created using Ansible" issuetype=Task
+ jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Example Issue
+ description: Created using Ansible
+ issuetype: Task
register: issue
- name: Comment on issue
- jira: uri={{server}} username={{user}} password={{pass}}
- issue={{issue.meta.key}} operation=comment
- comment="A comment added by Ansible"
+ jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: comment
+ comment: A comment added by Ansible
# Assign an existing issue using edit
- name: Assign an issue using free-form fields
- jira: uri={{server}} username={{user}} password={{pass}}
- issue={{issue.meta.key}} operation=edit
- assignee=ssmith
+ jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key}}'
+ operation: edit
+ assignee: ssmith
# Create an issue with an existing assignee
- name: Create an assigned issue
- jira: uri={{server}} username={{user}} password={{pass}}
- project=ANS operation=create
- summary="Assigned issue" description="Created and assigned using Ansible"
- issuetype=Task assignee=ssmith
-
-# Edit an issue using free-form fields
+ jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Assigned issue
+ description: Created and assigned using Ansible
+ issuetype: Task
+ assignee: ssmith
+
+# Edit an issue
- name: Set the labels on an issue using free-form fields
- jira: uri={{server}} username={{user}} password={{pass}}
- issue={{issue.meta.key}} operation=edit
- args: { fields: {labels: ["autocreated", "ansible"]}}
-
-- name: Set the labels on an issue, YAML version
- jira: uri={{server}} username={{user}} password={{pass}}
- issue={{issue.meta.key}} operation=edit
- args:
- fields:
- labels:
- - "autocreated"
- - "ansible"
- - "yaml"
+ jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: edit
+ args:
+ fields:
+ labels:
+ - autocreated
+ - ansible
# Retrieve metadata for an issue and use it to create an account
- name: Get an issue
- jira: uri={{server}} username={{user}} password={{pass}}
- project=ANS operation=fetch issue="ANS-63"
+ jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: fetch
+ issue: ANS-63
register: issue
- name: Create a unix account for the reporter
- sudo: true
- user: name="{{issue.meta.fields.creator.name}}" comment="{{issue.meta.fields.creator.displayName}}"
+ become: true
+ user:
+ name: '{{ issue.meta.fields.creator.name }}'
+ comment: '{{issue.meta.fields.creator.displayName }}'
+
+- name: Create link from HSP-1 to MKY-1
+ jira: uri={{server}} username={{user}} password={{pass}} operation=link
+ linktype=Relate inwardissue=HSP-1 outwardissue=MKY-1
# Transition an issue by target status
- name: Close the issue
- jira: uri={{server}} username={{user}} password={{pass}}
- issue={{issue.meta.key}} operation=transition status="Done"
+ jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: transition
+ status: Done
"""
-import json
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ # Let snippet from module_utils/basic.py return a proper error in this case
+ pass
+
import base64
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.pycompat24 import get_exception
+
def request(url, user, passwd, data=None, method=None):
if data:
data = json.dumps(data)
@@ -179,7 +247,7 @@ def request(url, user, passwd, data=None, method=None):
headers={'Content-Type':'application/json',
'Authorization':"Basic %s" % auth})
- if info['status'] not in (200, 204):
+ if info['status'] not in (200, 201, 204):
module.fail_json(msg=info['msg'])
body = response.read()
@@ -273,13 +341,26 @@ def transition(restbase, user, passwd, params):
return ret
+def link(restbase, user, passwd, params):
+ data = {
+ 'type': { 'name': params['linktype'] },
+ 'inwardIssue': { 'key': params['inwardissue'] },
+ 'outwardIssue': { 'key': params['outwardissue'] },
+ }
+
+ url = restbase + '/issueLink/'
+
+ ret = post(url, user, passwd, data)
+
+ return ret
# Some parameters are required depending on the operation:
OP_REQUIRED = dict(create=['project', 'issuetype', 'summary', 'description'],
comment=['issue', 'comment'],
edit=[],
fetch=['issue'],
- transition=['status'])
+ transition=['status'],
+ link=['linktype', 'inwardissue', 'outwardissue'])
def main():
@@ -287,7 +368,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
uri=dict(required=True),
- operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition'],
+ operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition', 'link'],
aliases=['command'], required=True),
username=dict(required=True),
password=dict(required=True),
@@ -299,7 +380,10 @@ def main():
comment=dict(),
status=dict(),
assignee=dict(),
- fields=dict(default={})
+ fields=dict(default={}, type='dict'),
+ linktype=dict(),
+ inwardissue=dict(),
+ outwardissue=dict(),
),
supports_check_mode=False
)
@@ -335,13 +419,13 @@ def main():
ret = method(restbase, user, passwd, module.params)
- except Exception, e:
+ except Exception:
+ e = get_exception()
return module.fail_json(msg=e.message)
module.exit_json(changed=True, meta=ret)
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/web_infrastructure/letsencrypt.py b/web_infrastructure/letsencrypt.py
new file mode 100644
index 00000000000..a8541a6d77a
--- /dev/null
+++ b/web_infrastructure/letsencrypt.py
@@ -0,0 +1,805 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016 Michael Gruener
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import binascii
+import copy
+import locale
+import textwrap
+from datetime import datetime
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: letsencrypt
+author: "Michael Gruener (@mgruener)"
+version_added: "2.2"
+short_description: Create SSL certificates with Let's Encrypt
+description:
+ - "Create and renew SSL certificates with Let's Encrypt. Let’s Encrypt is a
+ free, automated, and open certificate authority (CA), run for the
+ public’s benefit. For details see U(https://letsencrypt.org). The current
+ implementation supports the http-01, tls-sni-02 and dns-01 challenges."
+ - "To use this module, it has to be executed at least twice. Either as two
+ different tasks in the same run or during multiple runs."
+ - "Between these two tasks you have to fulfill the required steps for the
+ choosen challenge by whatever means necessary. For http-01 that means
+ creating the necessary challenge file on the destination webserver. For
+ dns-01 the necessary dns record has to be created. tls-sni-02 requires
+ you to create a SSL certificate with the appropriate subjectAlternativeNames.
+ It is I(not) the responsibility of this module to perform these steps."
+ - "For details on how to fulfill these challenges, you might have to read through
+ U(https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7)"
+ - "Although the defaults are choosen so that the module can be used with
+ the Let's Encrypt CA, the module can be used with any service using the ACME
+ protocol."
+requirements:
+ - "python >= 2.6"
+ - openssl
+options:
+ account_key:
+ description:
+ - "File containing the the Let's Encrypt account RSA key."
+ - "Can be created with C(openssl rsa ...)."
+ required: true
+ account_email:
+ description:
+ - "The email address associated with this account."
+ - "It will be used for certificate expiration warnings."
+ required: false
+ default: null
+ acme_directory:
+ description:
+ - "The ACME directory to use. This is the entry point URL to access
+ CA server API."
+ - "For safety reasons the default is set to the Let's Encrypt staging server.
+ This will create technically correct, but untrusted certificates."
+ required: false
+ default: https://acme-staging.api.letsencrypt.org/directory
+ agreement:
+ description:
+ - "URI to a terms of service document you agree to when using the
+ ACME service at C(acme_directory)."
+ required: false
+ default: 'https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf'
+ challenge:
+ description: The challenge to be performed.
+ required: false
+ choices: [ 'http-01', 'dns-01', 'tls-sni-02']
+ default: 'http-01'
+ csr:
+ description:
+ - "File containing the CSR for the new certificate."
+ - "Can be created with C(openssl csr ...)."
+ - "The CSR may contain multiple Subject Alternate Names, but each one
+ will lead to an individual challenge that must be fulfilled for the
+ CSR to be signed."
+ required: true
+ alias: ['src']
+ data:
+ description:
+ - "The data to validate ongoing challenges."
+ - "The value that must be used here will be provided by a previous use
+ of this module."
+ required: false
+ default: null
+ dest:
+ description: The destination file for the certificate.
+ required: true
+ alias: ['cert']
+ remaining_days:
+ description:
+ - "The number of days the certificate must have left being valid.
+ If C(remaining_days < cert_days), then it will be renewed.
+ If the certificate is not renewed, module return values will not
+ include C(challenge_data)."
+ required: false
+ default: 10
+'''
+
+EXAMPLES = '''
+- letsencrypt:
+ account_key: /etc/pki/cert/private/account.key
+ csr: /etc/pki/cert/csr/sample.com.csr
+ dest: /etc/httpd/ssl/sample.com.crt
+ register: sample_com_challenge
+
+# perform the necessary steps to fulfill the challenge
+# for example:
+#
+# - copy:
+# dest: /var/www/html/{{ sample_com_challenge['challenge_data']['sample.com']['http-01']['resource'] }}
+# content: "{{ sample_com_challenge['challenge_data']['sample.com']['http-01']['resource_value'] }}"
+# when: sample_com_challenge|changed
+
+- letsencrypt:
+ account_key: /etc/pki/cert/private/account.key
+ csr: /etc/pki/cert/csr/sample.com.csr
+ dest: /etc/httpd/ssl/sample.com.crt
+ data: "{{ sample_com_challenge }}"
+'''
+
+RETURN = '''
+cert_days:
+ description: the number of days the certificate remains valid.
+ returned: success
+challenge_data:
+ description: per domain / challenge type challenge data
+ returned: changed
+ type: dictionary
+ contains:
+ resource:
+ description: the challenge resource that must be created for validation
+ returned: changed
+ type: string
+ sample: .well-known/acme-challenge/evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA
+ resource_value:
+ description: the value the resource has to produce for the validation
+ returned: changed
+ type: string
+ sample: IlirfxKKXA...17Dt3juxGJ-PCt92wr-oA
+authorizations:
+ description: ACME authorization data.
+ returned: changed
+ type: list
+ contains:
+ authorization:
+ description: ACME authorization object. See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.1.2
+ returned: success
+ type: dict
+'''
+
+def nopad_b64(data):
+ return base64.urlsafe_b64encode(data).decode('utf8').replace("=", "")
+
+def simple_get(module,url):
+ resp, info = fetch_url(module, url, method='GET')
+
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ if info['body']:
+ content = info['body']
+
+ if content:
+ if info['content-type'].startswith('application/json'):
+ try:
+ result = module.from_json(content.decode('utf8'))
+ except ValueError:
+ module.fail_json(msg="Failed to parse the ACME response: {0} {1}".format(url,content))
+ else:
+ result = content
+
+ if info['status'] >= 400:
+ module.fail_json(msg="ACME request failed: CODE: {0} RESULT:{1}".format(info['status'],result))
+ return result
+
+def get_cert_days(module,cert_file):
+ '''
+ Return the days the certificate in cert_file remains valid and -1
+ if the file was not found.
+ '''
+ if not os.path.exists(cert_file):
+ return -1
+
+ openssl_bin = module.get_bin_path('openssl', True)
+ openssl_cert_cmd = [openssl_bin, "x509", "-in", cert_file, "-noout", "-text"]
+ _, out, _ = module.run_command(openssl_cert_cmd,check_rc=True)
+ try:
+ not_after_str = re.search(r"\s+Not After\s*:\s+(.*)",out.decode('utf8')).group(1)
+ not_after = datetime.datetime.fromtimestamp(time.mktime(time.strptime(not_after_str,'%b %d %H:%M:%S %Y %Z')))
+ except AttributeError:
+ module.fail_json(msg="No 'Not after' date found in {0}".format(cert_file))
+ except ValueError:
+ module.fail_json(msg="Failed to parse 'Not after' date of {0}".format(cert_file))
+ now = datetime.datetime.utcnow()
+ return (not_after - now).days
+
+# function source: network/basics/uri.py
+def write_file(module, dest, content):
+ '''
+ Write content to destination file dest, only if the content
+ has changed.
+ '''
+ changed = False
+ # create a tempfile with some test content
+ _, tmpsrc = tempfile.mkstemp()
+ f = open(tmpsrc, 'wb')
+ try:
+ f.write(content)
+ except Exception as err:
+ os.remove(tmpsrc)
+ module.fail_json(msg="failed to create temporary content file: %s" % str(err))
+ f.close()
+ checksum_src = None
+ checksum_dest = None
+ # raise an error if there is no tmpsrc file
+ if not os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Source %s does not exist" % (tmpsrc))
+ if not os.access(tmpsrc, os.R_OK):
+ os.remove(tmpsrc)
+ module.fail_json( msg="Source %s not readable" % (tmpsrc))
+ checksum_src = module.sha1(tmpsrc)
+ # check if there is no dest file
+ if os.path.exists(dest):
+ # raise an error if copy has no permission on dest
+ if not os.access(dest, os.W_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s not writable" % (dest))
+ if not os.access(dest, os.R_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s not readable" % (dest))
+ checksum_dest = module.sha1(dest)
+ else:
+ if not os.access(os.path.dirname(dest), os.W_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination dir %s not writable" % (os.path.dirname(dest)))
+ if checksum_src != checksum_dest:
+ try:
+ shutil.copyfile(tmpsrc, dest)
+ changed = True
+ except Exception as err:
+ os.remove(tmpsrc)
+ module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
+ os.remove(tmpsrc)
+ return changed
+
+class ACMEDirectory(object):
+ '''
+ The ACME server directory. Gives access to the available resources
+ and the Replay-Nonce for a given URI. This only works for
+ URIs that permit GET requests (so normally not the ones that
+ require authentication).
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.2
+ '''
+ def __init__(self, module):
+ self.module = module
+ self.directory_root = module.params['acme_directory']
+
+ self.directory = simple_get(self.module,self.directory_root)
+
+ def __getitem__(self, key): return self.directory[key]
+
+ def get_nonce(self,resource=None):
+ url = self.directory_root
+ if resource is not None:
+ url = resource
+ _, info = fetch_url(self.module, url, method='HEAD')
+ if info['status'] != 200:
+ self.module.fail_json(msg="Failed to get replay-nonce, got status {0}".format(info['status']))
+ return info['replay-nonce']
+
+class ACMEAccount(object):
+ '''
+ ACME account object. Handles the authorized communication with the
+ ACME server. Provides access to accound bound information like
+ the currently active authorizations and valid certificates
+ '''
+ def __init__(self,module):
+ self.module = module
+ self.agreement = module.params['agreement']
+ self.key = module.params['account_key']
+ self.email = module.params['account_email']
+ self.data = module.params['data']
+ self.directory = ACMEDirectory(module)
+ self.uri = None
+ self.changed = False
+
+ self._authz_list_uri = None
+ self._certs_list_uri = None
+
+ if not os.path.exists(self.key):
+ module.fail_json(msg="Account key %s not found" % (self.key))
+
+ self._openssl_bin = module.get_bin_path('openssl', True)
+
+ pub_hex, pub_exp = self._parse_account_key(self.key)
+ self.jws_header = {
+ "alg": "RS256",
+ "jwk": {
+ "e": nopad_b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
+ "kty": "RSA",
+ "n": nopad_b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))),
+ },
+ }
+ self.init_account()
+
+ def get_keyauthorization(self,token):
+ '''
+ Returns the key authorization for the given token
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.1
+ '''
+ accountkey_json = json.dumps(self.jws_header['jwk'], sort_keys=True, separators=(',', ':'))
+ thumbprint = nopad_b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())
+ return "{0}.{1}".format(token, thumbprint)
+
+ def _parse_account_key(self,key):
+ '''
+ Parses an RSA key file in PEM format and returns the modulus
+ and public exponent of the key
+ '''
+ openssl_keydump_cmd = [self._openssl_bin, "rsa", "-in", key, "-noout", "-text"]
+ _, out, _ = self.module.run_command(openssl_keydump_cmd,check_rc=True)
+
+ pub_hex, pub_exp = re.search(
+ r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
+ out.decode('utf8'), re.MULTILINE|re.DOTALL).groups()
+ pub_exp = "{0:x}".format(int(pub_exp))
+ if len(pub_exp) % 2:
+ pub_exp = "0{0}".format(pub_exp)
+
+ return pub_hex, pub_exp
+
+ def send_signed_request(self, url, payload):
+ '''
+ Sends a JWS signed HTTP POST request to the ACME server and returns
+ the response as dictionary
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.2
+ '''
+ protected = copy.deepcopy(self.jws_header)
+ protected["nonce"] = self.directory.get_nonce()
+
+ try:
+ payload64 = nopad_b64(self.module.jsonify(payload).encode('utf8'))
+ protected64 = nopad_b64(self.module.jsonify(protected).encode('utf8'))
+ except Exception as e:
+ self.module.fail_json(msg="Failed to encode payload / headers as JSON: {0}".format(e))
+
+ openssl_sign_cmd = [self._openssl_bin, "dgst", "-sha256", "-sign", self.key]
+ sign_payload = "{0}.{1}".format(protected64, payload64).encode('utf8')
+ _, out, _ = self.module.run_command(openssl_sign_cmd,data=sign_payload,check_rc=True, binary_data=True)
+
+ data = self.module.jsonify({
+ "header": self.jws_header,
+ "protected": protected64,
+ "payload": payload64,
+ "signature": nopad_b64(out),
+ })
+
+ resp, info = fetch_url(self.module, url, data=data, method='POST')
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ if info['body']:
+ content = info['body']
+
+ if content:
+ if info['content-type'].startswith('application/json'):
+ try:
+ result = self.module.from_json(content.decode('utf8'))
+ except ValueError:
+ self.module.fail_json(msg="Failed to parse the ACME response: {0} {1}".format(url,content))
+ else:
+ result = content
+
+ return result,info
+
+ def _new_reg(self,contact=[]):
+ '''
+ Registers a new ACME account. Returns True if the account was
+ created and False if it already existed (e.g. it was not newly
+ created)
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.3
+ '''
+ if self.uri is not None:
+ return True
+
+ new_reg = {
+ 'resource': 'new-reg',
+ 'agreement': self.agreement,
+ 'contact': contact
+ }
+
+ result, info = self.send_signed_request(self.directory['new-reg'], new_reg)
+ if 'location' in info:
+ self.uri = info['location']
+
+ if info['status'] in [200,201]:
+ # Account did not exist
+ self.changed = True
+ return True
+ elif info['status'] == 409:
+ # Account did exist
+ return False
+ else:
+ self.module.fail_json(msg="Error registering: {0} {1}".format(info['status'], result))
+
+ def init_account(self):
+ '''
+ Create or update an account on the ACME server. As the only way
+ (without knowing an account URI) to test if an account exists
+ is to try and create one with the provided account key, this
+ method will always result in an account being present (except
+ on error situations). If the account already exists, it will
+ update the contact information.
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.3
+ '''
+
+ contact = []
+ if self.email:
+ contact.append('mailto:' + self.email)
+
+ # if this is not a new registration (e.g. existing account)
+ if not self._new_reg(contact):
+ # pre-existing account, get account data...
+ result, _ = self.send_signed_request(self.uri, {'resource':'reg'})
+
+ # XXX: letsencrypt/boulder#1435
+ if 'authorizations' in result:
+ self._authz_list_uri = result['authorizations']
+ if 'certificates' in result:
+ self._certs_list_uri = result['certificates']
+
+ # ...and check if update is necessary
+ do_update = False
+ if 'contact' in result:
+ if cmp(contact,result['contact']) != 0:
+ do_update = True
+ elif len(contact) > 0:
+ do_update = True
+
+ if do_update:
+ upd_reg = result
+ upd_reg['contact'] = contact
+ result, _ = self.send_signed_request(self.uri, upd_reg)
+ self.changed = True
+
+ def get_authorizations(self):
+ '''
+ Return a list of currently active authorizations
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4
+ '''
+ authz_list = {'authorizations': []}
+ if self._authz_list_uri is None:
+ # XXX: letsencrypt/boulder#1435
+ # Workaround, retrieve the known authorization urls
+ # from the data attribute
+ # It is also a way to limit the queried authorizations, which
+ # might become relevant at some point
+ if (self.data is not None) and ('authorizations' in self.data):
+ for auth in self.data['authorizations']:
+ authz_list['authorizations'].append(auth['uri'])
+ else:
+ return []
+ else:
+ # TODO: need to handle pagination
+ authz_list = simple_get(self.module, self._authz_list_uri)
+
+ authz = []
+ for auth_uri in authz_list['authorizations']:
+ auth = simple_get(self.module,auth_uri)
+ auth['uri'] = auth_uri
+ authz.append(auth)
+
+ return authz
+
+class ACMEClient(object):
+ '''
+ ACME client class. Uses an ACME account object and a CSR to
+ start and validate ACME challenges and download the respective
+ certificates.
+ '''
+ def __init__(self,module):
+ self.module = module
+ self.challenge = module.params['challenge']
+ self.csr = module.params['csr']
+ self.dest = module.params['dest']
+ self.account = ACMEAccount(module)
+ self.directory = self.account.directory
+ self.authorizations = self.account.get_authorizations()
+ self.cert_days = -1
+ self.changed = self.account.changed
+
+ if not os.path.exists(self.csr):
+ module.fail_json(msg="CSR %s not found" % (self.csr))
+
+ self._openssl_bin = module.get_bin_path('openssl', True)
+ self.domains = self._get_csr_domains()
+
+ def _get_csr_domains(self):
+ '''
+ Parse the CSR and return the list of requested domains
+ '''
+ openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-noout", "-text"]
+ _, out, _ = self.module.run_command(openssl_csr_cmd,check_rc=True)
+
+ domains = set([])
+ common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out.decode('utf8'))
+ if common_name is not None:
+ domains.add(common_name.group(1))
+ subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE|re.DOTALL)
+ if subject_alt_names is not None:
+ for san in subject_alt_names.group(1).split(", "):
+ if san.startswith("DNS:"):
+ domains.add(san[4:])
+ return domains
+
+
+ def _get_domain_auth(self,domain):
+ '''
+ Get the status string of the first authorization for the given domain.
+ Return None if no active authorization for the given domain was found.
+ '''
+ if self.authorizations is None:
+ return None
+
+ for auth in self.authorizations:
+ if (auth['identifier']['type'] == 'dns') and (auth['identifier']['value'] == domain):
+ return auth
+ return None
+
+ def _add_or_update_auth(self,auth):
+ '''
+ Add or update the given authroization in the global authorizations list.
+ Return True if the auth was updated/added and False if no change was
+ necessary.
+ '''
+ for index,cur_auth in enumerate(self.authorizations):
+ if (cur_auth['uri'] == auth['uri']):
+ # does the auth parameter contain updated data?
+ if cmp(cur_auth,auth) != 0:
+ # yes, update our current authorization list
+ self.authorizations[index] = auth
+ return True
+ else:
+ return False
+ # this is a new authorization, add it to the list of current
+ # authorizations
+ self.authorizations.append(auth)
+ return True
+
+ def _new_authz(self,domain):
+ '''
+ Create a new authorization for the given domain.
+ Return the authorization object of the new authorization
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4
+ '''
+ if self.account.uri is None:
+ return
+
+ new_authz = {
+ "resource": "new-authz",
+ "identifier": {"type": "dns", "value": domain},
+ }
+
+ result, info = self.account.send_signed_request(self.directory['new-authz'], new_authz)
+ if info['status'] not in [200,201]:
+ self.module.fail_json(msg="Error requesting challenges: CODE: {0} RESULT: {1}".format(info['status'], result))
+ else:
+ result['uri'] = info['location']
+ return result
+
+ def _get_challenge_data(self,auth):
+ '''
+ Returns a dict with the data for all proposed (and supported) challenges
+ of the given authorization.
+ '''
+
+ data = {}
+ # no need to choose a specific challenge here as this module
+ # is not responsible for fulfilling the challenges. Calculate
+ # and return the required information for each challenge.
+ for challenge in auth['challenges']:
+ type = challenge['type']
+ token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
+ keyauthorization = self.account.get_keyauthorization(token)
+
+ # NOTE: tls-sni-01 is not supported by choice
+ # too complex to be usefull and tls-sni-02 is an alternative
+ # as soon as it is implemented server side
+ if type == 'http-01':
+ # https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.2
+ resource = '.well-known/acme-challenge/' + token
+ value = keyauthorization
+ elif type == 'tls-sni-02':
+ # https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.3
+ token_digest = hashlib.sha256(token.encode('utf8')).hexdigest()
+ ka_digest = hashlib.sha256(keyauthorization.encode('utf8')).hexdigest()
+ len_token_digest = len(token_digest)
+ len_ka_digest = len(ka_digest)
+ resource = 'subjectAlternativeNames'
+ value = [
+ "{0}.{1}.token.acme.invalid".format(token_digest[:len_token_digest/2],token_digest[len_token_digest/2:]),
+ "{0}.{1}.ka.acme.invalid".format(ka_digest[:len_ka_digest/2],ka_digest[len_ka_digest/2:]),
+ ]
+ elif type == 'dns-01':
+ # https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.4
+ resource = '_acme-challenge'
+ value = nopad_b64(hashlib.sha256(keyauthorization).digest()).encode('utf8')
+ else:
+ continue
+
+ data[type] = { 'resource': resource, 'resource_value': value }
+ return data
+
+ def _validate_challenges(self,auth):
+ '''
+ Validate the authorization provided in the auth dict. Returns True
+ when the validation was successfull and False when it was not.
+ '''
+ for challenge in auth['challenges']:
+ if self.challenge != challenge['type']:
+ continue
+
+ uri = challenge['uri']
+ token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
+ keyauthorization = self.account.get_keyauthorization(token)
+
+ challenge_response = {
+ "resource": "challenge",
+ "keyAuthorization": keyauthorization,
+ }
+ result, info = self.account.send_signed_request(uri, challenge_response)
+ if info['status'] not in [200,202]:
+ self.module.fail_json(msg="Error validating challenge: CODE: {0} RESULT: {1}".format(info['status'], result))
+
+ status = ''
+
+ while status not in ['valid','invalid','revoked']:
+ result = simple_get(self.module,auth['uri'])
+ result['uri'] = auth['uri']
+ if self._add_or_update_auth(result):
+ self.changed = True
+ # draft-ietf-acme-acme-02
+ # "status (required, string): ...
+ # If this field is missing, then the default value is "pending"."
+ if 'status' not in result:
+ status = 'pending'
+ else:
+ status = result['status']
+ time.sleep(2)
+
+ if status == 'invalid':
+ error_details = ''
+ # multiple challenges could have failed at this point, gather error
+ # details for all of them before failing
+ for challenge in result['challenges']:
+ if challenge['status'] == 'invalid':
+ error_details += ' CHALLENGE: {0}'.format(challenge['type'])
+ if 'error' in challenge:
+ error_details += ' DETAILS: {0};'.format(challenge['error']['detail'])
+ else:
+ error_details += ';'
+ self.module.fail_json(msg="Authorization for {0} returned invalid: {1}".format(result['identifier']['value'],error_details))
+
+ return status == 'valid'
+
+ def _new_cert(self):
+ '''
+ Create a new certificate based on the csr.
+ Return the certificate object as dict
+ https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.5
+ '''
+ openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-outform", "DER"]
+ _, out, _ = self.module.run_command(openssl_csr_cmd,check_rc=True)
+
+ new_cert = {
+ "resource": "new-cert",
+ "csr": nopad_b64(out),
+ }
+ result, info = self.account.send_signed_request(self.directory['new-cert'], new_cert)
+ if info['status'] not in [200,201]:
+ self.module.fail_json(msg="Error new cert: CODE: {0} RESULT: {1}".format(info['status'], result))
+ else:
+ return {'cert': result, 'uri': info['location']}
+
+ def _der_to_pem(self,der_cert):
+ '''
+ Convert the DER format certificate in der_cert to a PEM format
+ certificate and return it.
+ '''
+ return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
+ "\n".join(textwrap.wrap(base64.b64encode(der_cert).decode('utf8'), 64)))
+
+ def do_challenges(self):
+ '''
+ Create new authorizations for all domains of the CSR and return
+ the challenge details for the choosen challenge type.
+ '''
+ data = {}
+ for domain in self.domains:
+ auth = self._get_domain_auth(domain)
+ if auth is None:
+ new_auth = self._new_authz(domain)
+ self._add_or_update_auth(new_auth)
+ data[domain] = self._get_challenge_data(new_auth)
+ self.changed = True
+ elif (auth['status'] == 'pending') or ('status' not in auth):
+ # draft-ietf-acme-acme-02
+ # "status (required, string): ...
+ # If this field is missing, then the default value is "pending"."
+ self._validate_challenges(auth)
+ # _validate_challenges updates the global authrozation dict,
+ # so get the current version of the authorization we are working
+ # on to retrieve the challenge data
+ data[domain] = self._get_challenge_data(self._get_domain_auth(domain))
+
+ return data
+
+ def get_certificate(self):
+ '''
+ Request a new certificate and write it to the destination file.
+ Only do this if a destination file was provided and if all authorizations
+ for the domains of the csr are valid. No Return value.
+ '''
+ if self.dest is None:
+ return
+
+ for domain in self.domains:
+ auth = self._get_domain_auth(domain)
+ if auth is None or ('status' not in auth) or (auth['status'] != 'valid'):
+ return
+
+ cert = self._new_cert()
+ if cert['cert'] is not None:
+ pem_cert = self._der_to_pem(cert['cert'])
+ if write_file(self.module,self.dest,pem_cert):
+ self.cert_days = get_cert_days(self.module,self.dest)
+ self.changed = True
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ account_key = dict(required=True, type='path'),
+ account_email = dict(required=False, default=None, type='str'),
+ acme_directory = dict(required=False, default='https://acme-staging.api.letsencrypt.org/directory', type='str'),
+ agreement = dict(required=False, default='https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf', type='str'),
+ challenge = dict(required=False, default='http-01', choices=['http-01', 'dns-01', 'tls-sni-02'], type='str'),
+ csr = dict(required=True, aliases=['src'], type='path'),
+ data = dict(required=False, no_log=True, default=None, type='dict'),
+ dest = dict(required=True, aliases=['cert'], type='path'),
+ remaining_days = dict(required=False, default=10, type='int'),
+ ),
+ supports_check_mode = True,
+ )
+
+ # AnsibleModule() changes the locale, so change it back to C because we rely on time.strptime() when parsing certificate dates.
+ locale.setlocale(locale.LC_ALL, "C")
+
+ cert_days = get_cert_days(module,module.params['dest'])
+ if cert_days < module.params['remaining_days']:
+ # If checkmode is active, base the changed state solely on the status
+ # of the certificate file as all other actions (accessing an account, checking
+ # the authorization status...) would lead to potential changes of the current
+ # state
+ if module.check_mode:
+ module.exit_json(changed=True,authorizations={},
+ challenge_data={},cert_days=cert_days)
+ else:
+ client = ACMEClient(module)
+ client.cert_days = cert_days
+ data = client.do_challenges()
+ client.get_certificate()
+ module.exit_json(changed=client.changed,authorizations=client.authorizations,
+ challenge_data=data,cert_days=client.cert_days)
+ else:
+ module.exit_json(changed=False,cert_days=cert_days)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+if __name__ == '__main__':
+ main()
diff --git a/web_infrastructure/nginx_status_facts.py b/web_infrastructure/nginx_status_facts.py
new file mode 100644
index 00000000000..dd2fbd5ee17
--- /dev/null
+++ b/web_infrastructure/nginx_status_facts.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nginx_status_facts
+short_description: Retrieve nginx status facts.
+description:
+ - Gathers facts from nginx from an URL having C(stub_status) enabled.
+version_added: "2.3"
+author: "René Moser (@resmo)"
+options:
+ url:
+ description:
+ - URL of the nginx status.
+ required: true
+ timeout:
+ description:
+ - HTTP connection timeout in seconds.
+ required: false
+ default: 10
+
+notes:
+ - See http://nginx.org/en/docs/http/ngx_http_stub_status_module.html for more information.
+'''
+
+EXAMPLES = '''
+# Gather status facts from nginx on localhost
+- name: get current http stats
+ nginx_status_facts:
+ url: http://localhost/nginx_status
+
+# Gather status facts from nginx on localhost with a custom timeout of 20 seconds
+- name: get current http stats
+ nginx_status_facts:
+ url: http://localhost/nginx_status
+ timeout: 20
+'''
+
+RETURN = '''
+---
+nginx_status_facts.active_connections:
+ description: Active connections.
+ returned: success
+ type: int
+ sample: 2340
+nginx_status_facts.accepts:
+ description: The total number of accepted client connections.
+ returned: success
+ type: int
+ sample: 81769947
+nginx_status_facts.handled:
+ description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ returned: success
+ type: int
+ sample: 81769947
+nginx_status_facts.requests:
+ description: The total number of client requests.
+ returned: success
+ type: int
+ sample: 144332345
+nginx_status_facts.reading:
+ description: The current number of connections where nginx is reading the request header.
+ returned: success
+ type: int
+ sample: 0
+nginx_status_facts.writing:
+ description: The current number of connections where nginx is writing the response back to the client.
+ returned: success
+ type: int
+ sample: 241
+nginx_status_facts.waiting:
+ description: The current number of idle client connections waiting for a request.
+ returned: success
+ type: int
+ sample: 2092
+nginx_status_facts.data:
+ description: HTTP response as is.
+ returned: success
+ type: string
+ sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class NginxStatusFacts(object):
+
+ def __init__(self):
+ self.url = module.params.get('url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'nginx_status_facts': {
+ 'active_connections': None,
+ 'accepts': None,
+ 'handled': None,
+ 'requests': None,
+ 'reading': None,
+ 'writing': None,
+ 'waiting': None,
+ 'data': None,
+ }
+ }
+ (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
+
+ data = response.read()
+ if not data:
+ return result
+
+ result['nginx_status_facts']['data'] = data
+ match = re.match(r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \nReading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)', data, re.S)
+ if match:
+ result['nginx_status_facts']['active_connections'] = int(match.group(1))
+ result['nginx_status_facts']['accepts'] = int(match.group(2))
+ result['nginx_status_facts']['handled'] = int(match.group(3))
+ result['nginx_status_facts']['requests'] = int(match.group(4))
+ result['nginx_status_facts']['reading'] = int(match.group(5))
+ result['nginx_status_facts']['writing'] = int(match.group(6))
+ result['nginx_status_facts']['waiting'] = int(match.group(7))
+ return result
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(required=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ nginx_status_facts = NginxStatusFacts().run()
+ result = dict(changed=False, ansible_facts=nginx_status_facts)
+ module.exit_json(**result)
+
+if __name__ == '__main__':
+ main()
diff --git a/web_infrastructure/taiga_issue.py b/web_infrastructure/taiga_issue.py
new file mode 100644
index 00000000000..03be0952862
--- /dev/null
+++ b/web_infrastructure/taiga_issue.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Alejandro Guirao
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: taiga_issue
+short_description: Creates/deletes an issue in a Taiga Project Management Platform
+description:
+ - Creates/deletes an issue in a Taiga Project Management Platform (U(https://taiga.io)).
+ - An issue is identified by the combination of project, issue subject and issue type.
+ - This module implements the creation or deletion of issues (not the update).
+version_added: "2.0"
+options:
+ taiga_host:
+ description:
+ - The hostname of the Taiga instance.
+ required: False
+ default: https://api.taiga.io
+ project:
+ description:
+ - Name of the project containing the issue. Must exist previously.
+ required: True
+ subject:
+ description:
+ - The issue subject.
+ required: True
+ issue_type:
+ description:
+ - The issue type. Must exist previously.
+ required: True
+ priority:
+ description:
+ - The issue priority. Must exist previously.
+ required: False
+ default: Normal
+ status:
+ description:
+ - The issue status. Must exist previously.
+ required: False
+ default: New
+ severity:
+ description:
+ - The issue severity. Must exist previously.
+ required: False
+ default: Normal
+ description:
+ description:
+ - The issue description.
+ required: False
+ default: ""
+ attachment:
+ description:
+ - Path to a file to be attached to the issue.
+ required: False
+ default: None
+ attachment_description:
+ description:
+ - A string describing the file to be attached to the issue.
+ required: False
+ default: ""
+ tags:
+ description:
+ - A lists of tags to be assigned to the issue.
+ required: False
+ default: []
+ state:
+ description:
+ - Whether the issue should be present or not.
+ required: False
+ choices: ["present", "absent"]
+ default: present
+author: Alejandro Guirao (@lekum)
+requirements: [python-taiga]
+notes:
+- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD
+'''
+
+EXAMPLES = '''
+# Create an issue in the my hosted Taiga environment and attach an error log
+- taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ priority: High
+ status: New
+ severity: Important
+ description: An error has been found. Please check the attached error log for details.
+ attachment: /path/to/error.log
+ attachment_description: Error log file
+ tags:
+ - Error
+ - Needs manual check
+ state: present
+
+# Deletes the previously created issue
+- taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ state: absent
+'''
+
+RETURN = '''# '''
+from os import getenv
+from os.path import isfile
+
+try:
+ from taiga import TaigaAPI
+ from taiga.exceptions import TaigaException
+ TAIGA_MODULE_IMPORTED=True
+except ImportError:
+ TAIGA_MODULE_IMPORTED=False
+
+def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority,
+ issue_status, issue_type, issue_severity, issue_description,
+ issue_attachment, issue_attachment_description,
+ issue_tags, state, check_mode=False):
+ """
+ Method that creates/deletes issues depending whether they exist and the state desired
+
+ The credentials should be passed via environment variables:
+ - TAIGA_TOKEN
+ - TAIGA_USERNAME and TAIGA_PASSWORD
+
+ Returns a tuple with these elements:
+ - A boolean representing the success of the operation
+ - A descriptive message
+ - A dict with the issue attributes, in case of issue creation, otherwise empty dict
+ """
+
+ changed = False
+
+ try:
+ token = getenv('TAIGA_TOKEN')
+ if token:
+ api = TaigaAPI(host=taiga_host, token=token)
+ else:
+ api = TaigaAPI(host=taiga_host)
+ username = getenv('TAIGA_USERNAME')
+ password = getenv('TAIGA_PASSWORD')
+ if not any([username, password]):
+ return (False, changed, "Missing credentials", {})
+ api.auth(username=username, password=password)
+
+ user_id = api.me().id
+ project_list = filter(lambda x: x.name == project_name, api.projects.list(member=user_id))
+ if len(project_list) != 1:
+ return (False, changed, "Unable to find project %s" % project_name, {})
+ project = project_list[0]
+ project_id = project.id
+
+ priority_list = filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id))
+ if len(priority_list) != 1:
+ return (False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {})
+ priority_id = priority_list[0].id
+
+ status_list = filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id))
+ if len(status_list) != 1:
+ return (False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {})
+ status_id = status_list[0].id
+
+ type_list = filter(lambda x: x.name == issue_type, project.list_issue_types())
+ if len(type_list) != 1:
+ return (False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {})
+ type_id = type_list[0].id
+
+ severity_list = filter(lambda x: x.name == issue_severity, project.list_severities())
+ if len(severity_list) != 1:
+ return (False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {})
+ severity_id = severity_list[0].id
+
+ issue = {
+ "project": project_name,
+ "subject": issue_subject,
+ "priority": issue_priority,
+ "status": issue_status,
+ "type": issue_type,
+ "severity": issue_severity,
+ "description": issue_description,
+ "tags": issue_tags,
+ }
+
+ # An issue is identified by the project_name, the issue_subject and the issue_type
+ matching_issue_list = filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues())
+ matching_issue_list_len = len(matching_issue_list)
+
+ if matching_issue_list_len == 0:
+ # The issue does not exist in the project
+ if state == "present":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Create the issue
+ new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, description=issue_description)
+ if issue_attachment:
+ new_issue.attach(issue_attachment, description=issue_attachment_description)
+ issue["attachment"] = issue_attachment
+ issue["attachment_description"] = issue_attachment_description
+ return (True, changed, "Issue created", issue)
+
+ else:
+ # If does not exist, do nothing
+ return (True, changed, "Issue does not exist", {})
+
+ elif matching_issue_list_len == 1:
+ # The issue exists in the project
+ if state == "absent":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Delete the issue
+ matching_issue_list[0].delete()
+ return (True, changed, "Issue deleted", {})
+
+ else:
+ # Do nothing
+ return (True, changed, "Issue already exists", {})
+
+ else:
+ # More than 1 matching issue
+ return (False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {})
+
+ except TaigaException:
+ msg = "An exception happened: %s" % sys.exc_info()[1]
+ return (False, changed, msg, {})
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ taiga_host=dict(required=False, default="https://api.taiga.io"),
+ project=dict(required=True),
+ subject=dict(required=True),
+ issue_type=dict(required=True),
+ priority=dict(required=False, default="Normal"),
+ status=dict(required=False, default="New"),
+ severity=dict(required=False, default="Normal"),
+ description=dict(required=False, default=""),
+ attachment=dict(required=False, default=None),
+ attachment_description=dict(required=False, default=""),
+ tags=dict(required=False, default=[], type='list'),
+ state=dict(required=False, choices=['present','absent'], default='present'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not TAIGA_MODULE_IMPORTED:
+ msg = "This module needs python-taiga module"
+ module.fail_json(msg=msg)
+
+ taiga_host = module.params['taiga_host']
+ project_name = module.params['project']
+ issue_subject = module.params['subject']
+ issue_priority = module.params['priority']
+ issue_status = module.params['status']
+ issue_type = module.params['issue_type']
+ issue_severity = module.params['severity']
+ issue_description = module.params['description']
+ issue_attachment = module.params['attachment']
+ issue_attachment_description = module.params['attachment_description']
+ if issue_attachment:
+ if not isfile(issue_attachment):
+ msg = "%s is not a file" % issue_attachment
+ module.fail_json(msg=msg)
+ issue_tags = module.params['tags']
+ state = module.params['state']
+
+ return_status, changed, msg, issue_attr_dict = manage_issue(
+ module,
+ taiga_host,
+ project_name,
+ issue_subject,
+ issue_priority,
+ issue_status,
+ issue_type,
+ issue_severity,
+ issue_description,
+ issue_attachment,
+ issue_attachment_description,
+ issue_tags,
+ state,
+ check_mode=module.check_mode
+ )
+ if return_status:
+ if len(issue_attr_dict) > 0:
+ module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/windows/win_acl.ps1 b/windows/win_acl.ps1
new file mode 100644
index 00000000000..068130a203f
--- /dev/null
+++ b/windows/win_acl.ps1
@@ -0,0 +1,206 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Phil Schwartz
+# Copyright 2015, Trond Hindenes
+# Copyright 2015, Hans-Joachim Kliemeck
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+# win_acl module (File/Resources Permission Additions/Removal)
+
+
+#Functions
+Function UserSearch
+{
+ Param ([string]$accountName)
+ #Check if there's a realm specified
+
+ $searchDomain = $false
+ $searchDomainUPN = $false
+ $SearchAppPools = $false
+ if ($accountName.Split("\").count -gt 1)
+ {
+ if ($accountName.Split("\")[0] -eq $env:COMPUTERNAME)
+ {
+
+ }
+ elseif ($accountName.Split("\")[0] -eq "IIS APPPOOL")
+ {
+ $SearchAppPools = $true
+ $accountName = $accountName.split("\")[1]
+ }
+ else
+ {
+ $searchDomain = $true
+ $accountName = $accountName.split("\")[1]
+ }
+ }
+ Elseif ($accountName.contains("@"))
+ {
+ $searchDomain = $true
+ $searchDomainUPN = $true
+ }
+ Else
+ {
+ #Default to local user account
+ $accountName = $env:COMPUTERNAME + "\" + $accountName
+ }
+
+ if (($searchDomain -eq $false) -and ($SearchAppPools -eq $false))
+ {
+ # do not use Win32_UserAccount, because e.g. SYSTEM (BUILTIN\SYSTEM or COMPUUTERNAME\SYSTEM) will not be listed. on Win32_Account groups will be listed too
+ $localaccount = get-wmiobject -class "Win32_Account" -namespace "root\CIMV2" -filter "(LocalAccount = True)" | where {$_.Caption -eq $accountName}
+ if ($localaccount)
+ {
+ return $localaccount.SID
+ }
+ }
+ Elseif ($SearchAppPools -eq $true)
+ {
+ Import-Module WebAdministration
+ $testiispath = Test-path "IIS:"
+ if ($testiispath -eq $false)
+ {
+ return $null
+ }
+ else
+ {
+ $apppoolobj = Get-ItemProperty IIS:\AppPools\$accountName
+ return $apppoolobj.applicationPoolSid
+ }
+ }
+ {
+ #Search by samaccountname
+ $Searcher = [adsisearcher]""
+
+ If ($searchDomainUPN -eq $false) {
+ $Searcher.Filter = "sAMAccountName=$($accountName)"
+ }
+ Else {
+ $Searcher.Filter = "userPrincipalName=$($accountName)"
+ }
+
+ $result = $Searcher.FindOne()
+ if ($result)
+ {
+ $user = $result.GetDirectoryEntry()
+
+ # get binary SID from AD account
+ $binarySID = $user.ObjectSid.Value
+
+ # convert to string SID
+ return (New-Object System.Security.Principal.SecurityIdentifier($binarySID,0)).Value
+ }
+ }
+}
+
+$params = Parse-Args $args;
+
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+$path = Get-Attr $params "path" -failifempty $true
+$user = Get-Attr $params "user" -failifempty $true
+$rights = Get-Attr $params "rights" -failifempty $true
+
+$type = Get-Attr $params "type" -failifempty $true -validateSet "allow","deny" -resultobj $result
+$state = Get-Attr $params "state" "present" -validateSet "present","absent" -resultobj $result
+
+$inherit = Get-Attr $params "inherit" ""
+$propagation = Get-Attr $params "propagation" "None" -validateSet "None","NoPropagateInherit","InheritOnly" -resultobj $result
+
+If (-Not (Test-Path -Path $path)) {
+ Fail-Json $result "$path file or directory does not exist on the host"
+}
+
+# Test that the user/group is resolvable on the local machine
+$sid = UserSearch -AccountName ($user)
+if (!$sid)
+{
+ Fail-Json $result "$user is not a valid user or group on the host machine or domain"
+}
+
+If (Test-Path -Path $path -PathType Leaf) {
+ $inherit = "None"
+}
+ElseIf ($inherit -eq "") {
+ $inherit = "ContainerInherit, ObjectInherit"
+}
+
+Try {
+ $colRights = [System.Security.AccessControl.FileSystemRights]$rights
+ $InheritanceFlag = [System.Security.AccessControl.InheritanceFlags]$inherit
+ $PropagationFlag = [System.Security.AccessControl.PropagationFlags]$propagation
+
+ If ($type -eq "allow") {
+ $objType =[System.Security.AccessControl.AccessControlType]::Allow
+ }
+ Else {
+ $objType =[System.Security.AccessControl.AccessControlType]::Deny
+ }
+
+ $objUser = New-Object System.Security.Principal.SecurityIdentifier($sid)
+ $objACE = New-Object System.Security.AccessControl.FileSystemAccessRule ($objUser, $colRights, $InheritanceFlag, $PropagationFlag, $objType)
+ $objACL = Get-ACL $path
+
+ # Check if the ACE exists already in the objects ACL list
+ $match = $false
+ ForEach($rule in $objACL.Access){
+ $ruleIdentity = $rule.IdentityReference.Translate([System.Security.Principal.SecurityIdentifier])
+ If (($rule.FileSystemRights -eq $objACE.FileSystemRights) -And ($rule.AccessControlType -eq $objACE.AccessControlType) -And ($ruleIdentity -eq $objACE.IdentityReference) -And ($rule.IsInherited -eq $objACE.IsInherited) -And ($rule.InheritanceFlags -eq $objACE.InheritanceFlags) -And ($rule.PropagationFlags -eq $objACE.PropagationFlags)) {
+ $match = $true
+ Break
+ }
+ }
+
+ If ($state -eq "present" -And $match -eq $false) {
+ Try {
+ $objACL.AddAccessRule($objACE)
+ Set-ACL $path $objACL
+ Set-Attr $result "changed" $true;
+ }
+ Catch {
+ Fail-Json $result "an exception occured when adding the specified rule"
+ }
+ }
+ ElseIf ($state -eq "absent" -And $match -eq $true) {
+ Try {
+ $objACL.RemoveAccessRule($objACE)
+ Set-ACL $path $objACL
+ Set-Attr $result "changed" $true;
+ }
+ Catch {
+ Fail-Json $result "an exception occured when removing the specified rule"
+ }
+ }
+ Else {
+ # A rule was attempting to be added but already exists
+ If ($match -eq $true) {
+ Exit-Json $result "the specified rule already exists"
+ }
+ # A rule didn't exist that was trying to be removed
+ Else {
+ Exit-Json $result "the specified rule does not exist"
+ }
+ }
+}
+Catch {
+ Fail-Json $result "an error occured when attempting to $state $rights permission(s) on $path for $user"
+}
+
+Exit-Json $result
diff --git a/windows/win_acl.py b/windows/win_acl.py
new file mode 100644
index 00000000000..4e6e9cb7ad6
--- /dev/null
+++ b/windows/win_acl.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015, Phil Schwartz
+# Copyright 2015, Trond Hindenes
+# Copyright 2015, Hans-Joachim Kliemeck
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: win_acl
+version_added: "2.0"
+short_description: Set file/directory permissions for a system user or group.
+description:
+ - Add or remove rights/permissions for a given user or group for the specified src file or folder.
+ - If adding ACL's for AppPool identities (available since 2.3), the Windows "Feature Web-Scripting-Tools" must be enabled
+options:
+ path:
+ description:
+ - File or Directory
+ required: yes
+ user:
+ description:
+ - User or Group to add specified rights to act on src file/folder
+ required: yes
+ default: none
+ state:
+ description:
+ - Specify whether to add C(present) or remove C(absent) the specified access rule
+ required: no
+ choices:
+ - present
+ - absent
+ default: present
+ type:
+ description:
+ - Specify whether to allow or deny the rights specified
+ required: yes
+ choices:
+ - allow
+ - deny
+ default: none
+ rights:
+ description:
+ - The rights/permissions that are to be allowed/denyed for the specified user or group for the given src file or directory. Can be entered as a comma separated list (Ex. "Modify, Delete, ExecuteFile"). For more information on the choices see MSDN FileSystemRights Enumeration.
+ required: yes
+ choices:
+ - AppendData
+ - ChangePermissions
+ - Delete
+ - DeleteSubdirectoriesAndFiles
+ - ExecuteFile
+ - FullControl
+ - ListDirectory
+ - Modify
+ - Read
+ - ReadAndExecute
+ - ReadAttributes
+ - ReadData
+ - ReadExtendedAttributes
+ - ReadPermissions
+ - Synchronize
+ - TakeOwnership
+ - Traverse
+ - Write
+ - WriteAttributes
+ - WriteData
+ - WriteExtendedAttributes
+ default: none
+ inherit:
+ description:
+ - Inherit flags on the ACL rules. Can be specified as a comma separated list (Ex. "ContainerInherit, ObjectInherit"). For more information on the choices see MSDN InheritanceFlags Enumeration.
+ required: no
+ choices:
+ - ContainerInherit
+ - ObjectInherit
+ - None
+ default: For Leaf File, None; For Directory, ContainerInherit, ObjectInherit;
+ propagation:
+ description:
+ - Propagation flag on the ACL rules. For more information on the choices see MSDN PropagationFlags Enumeration.
+ required: no
+ choices:
+ - None
+ - NoPropagateInherit
+ - InheritOnly
+ default: "None"
+author: Phil Schwartz (@schwartzmx), Trond Hindenes (@trondhindenes), Hans-Joachim Kliemeck (@h0nIg)
+'''
+
+EXAMPLES = '''
+# Restrict write,execute access to User Fed-Phil
+$ ansible -i hosts -m win_acl -a "user=Fed-Phil path=C:\Important\Executable.exe type=deny rights='ExecuteFile,Write'" all
+
+# Playbook example
+# Add access rule to allow IIS_IUSRS FullControl to MySite
+---
+- name: Add IIS_IUSRS allow rights
+ win_acl:
+ path: 'C:\inetpub\wwwroot\MySite'
+ user: 'IIS_IUSRS'
+ rights: 'FullControl'
+ type: 'allow'
+ state: 'present'
+ inherit: 'ContainerInherit, ObjectInherit'
+ propagation: 'None'
+
+# Remove previously added rule for IIS_IUSRS
+- name: Remove FullControl AccessRule for IIS_IUSRS
+ path: 'C:\inetpub\wwwroot\MySite'
+ user: 'IIS_IUSRS'
+ rights: 'FullControl'
+ type: 'allow'
+ state: 'absent'
+ inherit: 'ContainerInherit, ObjectInherit'
+ propagation: 'None'
+
+# Deny Intern
+- name: Deny Deny
+ path: 'C:\Administrator\Documents'
+ user: 'Intern'
+ rights: 'Read,Write,Modify,FullControl,Delete'
+ type: 'deny'
+ state: 'present'
+'''
diff --git a/windows/win_acl_inheritance.ps1 b/windows/win_acl_inheritance.ps1
new file mode 100644
index 00000000000..1933a3a5dd4
--- /dev/null
+++ b/windows/win_acl_inheritance.ps1
@@ -0,0 +1,86 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Hans-Joachim Kliemeck
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+
+$params = Parse-Args $args;
+
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+$path = Get-Attr $params "path" -failifempty $true
+$state = Get-Attr $params "state" "absent" -validateSet "present","absent" -resultobj $result
+$reorganize = Get-Attr $params "reorganize" "no" -validateSet "no","yes" -resultobj $result
+$reorganize = $reorganize | ConvertTo-Bool
+
+If (-Not (Test-Path -Path $path)) {
+ Fail-Json $result "$path file or directory does not exist on the host"
+}
+
+Try {
+ $objACL = Get-ACL $path
+ $inheritanceEnabled = !$objACL.AreAccessRulesProtected
+
+ If (($state -eq "present") -And !$inheritanceEnabled) {
+ # second parameter is ignored if first=$False
+ $objACL.SetAccessRuleProtection($False, $False)
+
+ If ($reorganize) {
+ # it wont work without intermediate save, state would be the same
+ Set-ACL $path $objACL
+ $objACL = Get-ACL $path
+
+ # convert explicit ACE to inherited ACE
+ ForEach($inheritedRule in $objACL.Access) {
+ If (!$inheritedRule.IsInherited) {
+ Continue
+ }
+
+ ForEach($explicitRrule in $objACL.Access) {
+ If ($explicitRrule.IsInherited) {
+ Continue
+ }
+
+ If (($inheritedRule.FileSystemRights -eq $explicitRrule.FileSystemRights) -And ($inheritedRule.AccessControlType -eq $explicitRrule.AccessControlType) -And ($inheritedRule.IdentityReference -eq $explicitRrule.IdentityReference) -And ($inheritedRule.InheritanceFlags -eq $explicitRrule.InheritanceFlags) -And ($inheritedRule.PropagationFlags -eq $explicitRrule.PropagationFlags)) {
+ $objACL.RemoveAccessRule($explicitRrule)
+ }
+ }
+ }
+ }
+
+ Set-ACL $path $objACL
+ Set-Attr $result "changed" $true;
+ }
+ Elseif (($state -eq "absent") -And $inheritanceEnabled) {
+ If ($reorganize) {
+ $objACL.SetAccessRuleProtection($True, $True)
+ } Else {
+ $objACL.SetAccessRuleProtection($True, $False)
+ }
+
+ Set-ACL $path $objACL
+ Set-Attr $result "changed" $true;
+ }
+}
+Catch {
+ Fail-Json $result "an error occured when attempting to disable inheritance"
+}
+
+Exit-Json $result
diff --git a/windows/win_acl_inheritance.py b/windows/win_acl_inheritance.py
new file mode 100644
index 00000000000..549ce629335
--- /dev/null
+++ b/windows/win_acl_inheritance.py
@@ -0,0 +1,83 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015, Hans-Joachim Kliemeck
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: win_acl_inheritance
+version_added: "2.1"
+short_description: Change ACL inheritance
+description:
+ - Change ACL (Access Control List) inheritance and optionally copy inherited ACE's (Access Control Entry) to dedicated ACE's or vice versa.
+options:
+ path:
+ description:
+ - Path to be used for changing inheritance
+ required: true
+ state:
+ description:
+ - Specify whether to enable I(present) or disable I(absent) ACL inheritance
+ required: false
+ choices:
+ - present
+ - absent
+ default: absent
+ reorganize:
+ description:
+ - For P(state) = I(absent), indicates if the inherited ACE's should be copied from the parent directory. This is necessary (in combination with removal) for a simple ACL instead of using multiple ACE deny entries.
+ - For P(state) = I(present), indicates if the inherited ACE's should be deduplicated compared to the parent directory. This removes complexity of the ACL structure.
+ required: false
+ choices:
+ - no
+ - yes
+ default: no
+author: Hans-Joachim Kliemeck (@h0nIg)
+'''
+
+EXAMPLES = '''
+# Playbook example
+---
+- name: Disable inherited ACE's
+ win_acl_inheritance:
+ path: 'C:\\apache\\'
+ state: absent
+
+- name: Disable and copy inherited ACE's
+ win_acl_inheritance:
+ path: 'C:\\apache\\'
+ state: absent
+ reorganize: yes
+
+- name: Enable and remove dedicated ACE's
+ win_acl_inheritance:
+ path: 'C:\\apache\\'
+ state: present
+ reorganize: yes
+'''
+
+RETURN = '''
+
+'''
\ No newline at end of file
diff --git a/windows/win_chocolatey.ps1 b/windows/win_chocolatey.ps1
index ce006fff152..3bb6a1f0dc0 100644
--- a/windows/win_chocolatey.ps1
+++ b/windows/win_chocolatey.ps1
@@ -34,6 +34,17 @@ if ($source) {$source = $source.Tolower()}
$showlog = Get-Attr -obj $params -name showlog -default "false" | ConvertTo-Bool
$state = Get-Attr -obj $params -name state -default "present"
+
+$installargs = Get-Attr -obj $params -name install_args -default $null
+$packageparams = Get-Attr -obj $params -name params -default $null
+$allowemptychecksums = Get-Attr -obj $params -name allow_empty_checksums -default "false" | ConvertTo-Bool
+$ignorechecksums = Get-Attr -obj $params -name ignore_checksums -default "false" | ConvertTo-Bool
+$ignoredependencies = Get-Attr -obj $params -name ignore_dependencies -default "false" | ConvertTo-Bool
+
+# as of chocolatey 0.9.10, nonzero success exit codes can be returned
+# see https://github.com/chocolatey/choco/issues/512#issuecomment-214284461
+$successexitcodes = (0,1605,1614,1641,3010)
+
if ("present","absent" -notcontains $state)
{
Fail-Json $result "state is $state; must be present or absent"
@@ -50,7 +61,12 @@ Function Chocolatey-Install-Upgrade
if ($ChocoAlreadyInstalled -eq $null)
{
#We need to install chocolatey
- iex ((new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1"))
+ $install_output = (new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1") | powershell -
+ if ($LASTEXITCODE -ne 0)
+ {
+ Set-Attr $result "choco_bootstrap_output" $install_output
+ Fail-Json $result "Chocolatey bootstrap installation failed."
+ }
$result.changed = $true
$script:executable = "C:\ProgramData\chocolatey\bin\choco.exe"
}
@@ -58,7 +74,7 @@ Function Chocolatey-Install-Upgrade
{
$script:executable = "choco.exe"
- if ((choco --version) -lt '0.9.9')
+ if ([Version](choco --version) -lt [Version]'0.9.9')
{
Choco-Upgrade chocolatey
}
@@ -86,7 +102,7 @@ Function Choco-IsInstalled
Throw "Error checking installation status for $package"
}
- If ("$results" -match " $package .* (\d+) packages installed.")
+ If ("$results" -match "$package .* (\d+) packages installed.")
{
return $matches[1] -gt 0
}
@@ -106,7 +122,17 @@ Function Choco-Upgrade
[Parameter(Mandatory=$false, Position=3)]
[string]$source,
[Parameter(Mandatory=$false, Position=4)]
- [bool]$force
+ [bool]$force,
+ [Parameter(Mandatory=$false, Position=5)]
+ [string]$installargs,
+ [Parameter(Mandatory=$false, Position=6)]
+ [string]$packageparams,
+ [Parameter(Mandatory=$false, Position=7)]
+ [bool]$allowemptychecksums,
+ [Parameter(Mandatory=$false, Position=8)]
+ [bool]$ignorechecksums,
+ [Parameter(Mandatory=$false, Position=9)]
+ [bool]$ignoredependencies
)
if (-not (Choco-IsInstalled $package))
@@ -131,9 +157,34 @@ Function Choco-Upgrade
$cmd += " -force"
}
+ if ($installargs)
+ {
+ $cmd += " -installargs '$installargs'"
+ }
+
+ if ($packageparams)
+ {
+ $cmd += " -params '$packageparams'"
+ }
+
+ if ($allowemptychecksums)
+ {
+ $cmd += " --allow-empty-checksums"
+ }
+
+ if ($ignorechecksums)
+ {
+ $cmd += " --ignore-checksums"
+ }
+
+ if ($ignoredependencies)
+ {
+ $cmd += " -ignoredependencies"
+ }
+
$results = invoke-expression $cmd
- if ($LastExitCode -ne 0)
+ if ($LastExitCode -notin $successexitcodes)
{
Set-Attr $result "choco_error_cmd" $cmd
Set-Attr $result "choco_error_log" "$results"
@@ -163,17 +214,35 @@ Function Choco-Install
[Parameter(Mandatory=$false, Position=4)]
[bool]$force,
[Parameter(Mandatory=$false, Position=5)]
- [bool]$upgrade
+ [bool]$upgrade,
+ [Parameter(Mandatory=$false, Position=6)]
+ [string]$installargs,
+ [Parameter(Mandatory=$false, Position=7)]
+ [string]$packageparams,
+ [Parameter(Mandatory=$false, Position=8)]
+ [bool]$allowemptychecksums,
+ [Parameter(Mandatory=$false, Position=9)]
+ [bool]$ignorechecksums,
+ [Parameter(Mandatory=$false, Position=10)]
+ [bool]$ignoredependencies
)
if (Choco-IsInstalled $package)
{
if ($upgrade)
{
- Choco-Upgrade -package $package -version $version -source $source -force $force
+ Choco-Upgrade -package $package -version $version -source $source -force $force `
+ -installargs $installargs -packageparams $packageparams `
+ -allowemptychecksums $allowemptychecksums -ignorechecksums $ignorechecksums `
+ -ignoredependencies $ignoredependencies
+
+ return
}
- return
+ if (-not $force)
+ {
+ return
+ }
}
$cmd = "$executable install -dv -y $package"
@@ -193,9 +262,34 @@ Function Choco-Install
$cmd += " -force"
}
+ if ($installargs)
+ {
+ $cmd += " -installargs '$installargs'"
+ }
+
+ if ($packageparams)
+ {
+ $cmd += " -params '$packageparams'"
+ }
+
+ if ($allowemptychecksums)
+ {
+ $cmd += " --allow-empty-checksums"
+ }
+
+ if ($ignorechecksums)
+ {
+ $cmd += " --ignore-checksums"
+ }
+
+ if ($ignoredependencies)
+ {
+ $cmd += " -ignoredependencies"
+ }
+
$results = invoke-expression $cmd
- if ($LastExitCode -ne 0)
+ if ($LastExitCode -notin $successexitcodes)
{
Set-Attr $result "choco_error_cmd" $cmd
Set-Attr $result "choco_error_log" "$results"
@@ -235,9 +329,14 @@ Function Choco-Uninstall
$cmd += " -force"
}
+ if ($packageparams)
+ {
+ $cmd += " -params '$packageparams'"
+ }
+
$results = invoke-expression $cmd
- if ($LastExitCode -ne 0)
+ if ($LastExitCode -notin $successexitcodes)
{
Set-Attr $result "choco_error_cmd" $cmd
Set-Attr $result "choco_error_log" "$results"
@@ -253,7 +352,9 @@ Try
if ($state -eq "present")
{
Choco-Install -package $package -version $version -source $source `
- -force $force -upgrade $upgrade
+ -force $force -upgrade $upgrade -installargs $installargs `
+ -packageparams $packageparams -allowemptychecksums $allowemptychecksums `
+ -ignorechecksums $ignorechecksums -ignoredependencies $ignoredependencies
}
else
{
@@ -267,3 +368,4 @@ Catch
Fail-Json $result $_.Exception.Message
}
+
diff --git a/windows/win_chocolatey.py b/windows/win_chocolatey.py
index 7f399dbd22f..89e6d73af0e 100644
--- a/windows/win_chocolatey.py
+++ b/windows/win_chocolatey.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_chocolatey
@@ -33,48 +37,59 @@
description:
- Name of the package to be installed
required: true
- default: null
- aliases: []
state:
description:
- State of the package on the system
- required: false
choices:
- present
- absent
default: present
- aliases: []
force:
description:
- Forces install of the package (even if it already exists). Using Force will cause ansible to always report that a change was made
- required: false
choices:
- yes
- no
default: no
- aliases: []
upgrade:
description:
- If package is already installed it, try to upgrade to the latest version or to the specified version
- required: false
choices:
- yes
- no
default: no
- aliases: []
version:
description:
- Specific version of the package to be installed
- Ignored when state == 'absent'
- required: false
- default: null
- aliases: []
source:
description:
- Specify source rather than using default chocolatey repository
+ install_args:
+ description:
+ - Arguments to pass to the native installer
+ version_added: '2.1'
+ params:
+ description:
+ - Parameters to pass to the package
+ version_added: '2.1'
+ allow_empty_checksums:
+ description:
+ - Allow empty Checksums to be used
require: false
- default: null
- aliases: []
+ default: false
+ version_added: '2.2'
+ ignore_checksums:
+ description:
+ - Ignore Checksums
+ require: false
+ default: false
+ version_added: '2.2'
+ ignore_dependencies:
+ description:
+ - Ignore dependencies, only install/upgrade the package itself
+ default: false
+ version_added: '2.1'
author: "Trond Hindenes (@trondhindenes), Peter Mounce (@petemounce), Pepe Barbe (@elventear), Adam Keech (@smadam813)"
'''
@@ -91,7 +106,7 @@
# Install notepadplusplus version 6.6
win_chocolatey:
name: notepadplusplus.install
- version: 6.6
+ version: '6.6'
# Uninstall git
win_chocolatey:
diff --git a/windows/win_dotnet_ngen.py b/windows/win_dotnet_ngen.py
index 75ce9cc138b..9fb7e44e016 100644
--- a/windows/win_dotnet_ngen.py
+++ b/windows/win_dotnet_ngen.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_dotnet_ngen
diff --git a/windows/win_environment.ps1 b/windows/win_environment.ps1
index 1398524cfbb..f1acfe19356 100644
--- a/windows/win_environment.ps1
+++ b/windows/win_environment.ps1
@@ -20,36 +20,18 @@
# POWERSHELL_COMMON
$params = Parse-Args $args;
-$result = New-Object PSObject;
-Set-Attr $result "changed" $false;
+$state = Get-AnsibleParam -obj $params -name "state" -default "present" -validateSet "present","absent"
+$name = Get-AnsibleParam -obj $params -name "name" -failifempty $true
+$level = Get-AnsibleParam -obj $params -name "level" -validateSet "machine","process","user" -failifempty $true
+$value = Get-AnsibleParam -obj $params -name "value"
-If ($params.state) {
- $state = $params.state.ToString().ToLower()
- If (($state -ne 'present') -and ($state -ne 'absent') ) {
- Fail-Json $result "state is '$state'; must be 'present', or 'absent'"
- }
-} else {
- $state = 'present'
-}
-
-If ($params.name)
-{
- $name = $params.name
-} else {
- Fail-Json $result "missing required argument: name"
-}
-
-$value = $params.value
-
-If ($params.level) {
- $level = $params.level.ToString().ToLower()
- If (( $level -ne 'machine') -and ( $level -ne 'user' ) -and ( $level -ne 'process')) {
- Fail-Json $result "level is '$level'; must be 'machine', 'user', or 'process'"
- }
+If ($level) {
+ $level = $level.ToString().ToLower()
}
$before_value = [Environment]::GetEnvironmentVariable($name, $level)
+$state = $state.ToString().ToLower()
if ($state -eq "present" ) {
[Environment]::SetEnvironmentVariable($name, $value, $level)
} Elseif ($state -eq "absent") {
@@ -58,6 +40,8 @@ if ($state -eq "present" ) {
$after_value = [Environment]::GetEnvironmentVariable($name, $level)
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
Set-Attr $result "name" $name;
Set-Attr $result "before_value" $before_value;
Set-Attr $result "value" $after_value;
diff --git a/windows/win_environment.py b/windows/win_environment.py
index 8d4a1701695..f66771a758d 100644
--- a/windows/win_environment.py
+++ b/windows/win_environment.py
@@ -21,15 +21,18 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_environment
version_added: "2.0"
-short_description: Modifies environment variables on windows guests
+short_description: Modifies environment variables on windows hosts.
description:
- - Uses .net Environment to set or remove environment variables.
- - Can set at User, Machine or Process level.
- - Note that usual rules apply, so existing environments will not change until new processes are started.
+ - Uses .net Environment to set or remove environment variables and can set at User, Machine or Process level.
+ - User level environment variables will be set, but not available until the user has logged off and on again.
options:
state:
description:
@@ -62,6 +65,13 @@
- process
- user
author: "Jon Hawkesworth (@jhawkesworth)"
+notes:
+ - This module does not broadcast change events.
+ This means that the minority of windows applications which can have
+ their environment changed without restarting will not be notified and
+ therefore will need restarting to pick up new environment settings.
+ User level environment variables will require the user to log out
+ and in again before they become available.
'''
EXAMPLES = '''
diff --git a/windows/win_file_version.ps1 b/windows/win_file_version.ps1
new file mode 100644
index 00000000000..2e2f341c461
--- /dev/null
+++ b/windows/win_file_version.ps1
@@ -0,0 +1,78 @@
+#!powershell
+
+#this file is part of Ansible
+#Copyright © 2015 Sam Liu
+
+#This program is free software: you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation, either version 3 of the License, or
+#(at your option) any later version.
+
+#This program is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with this program. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$result = New-Object psobject @{
+ win_file_version = New-Object psobject
+ changed = $false
+}
+
+$path = Get-AnsibleParam $params "path" -failifempty $true -resultobj $result
+
+If (-Not (Test-Path -Path $path -PathType Leaf)){
+ Fail-Json $result "Specfied path $path does exist or is not a file."
+}
+$ext = [System.IO.Path]::GetExtension($path)
+If ( $ext -notin '.exe', '.dll'){
+ Fail-Json $result "Specfied path $path is not a vaild file type; must be DLL or EXE."
+}
+
+Try {
+ $_version_fields = [System.Diagnostics.FileVersionInfo]::GetVersionInfo($path)
+ $file_version = $_version_fields.FileVersion
+ If ($file_version -eq $null){
+ $file_version = ''
+ }
+ $product_version = $_version_fields.ProductVersion
+ If ($product_version -eq $null){
+ $product_version= ''
+ }
+ $file_major_part = $_version_fields.FileMajorPart
+ If ($file_major_part -eq $null){
+ $file_major_part= ''
+ }
+ $file_minor_part = $_version_fields.FileMinorPart
+ If ($file_minor_part -eq $null){
+ $file_minor_part= ''
+ }
+ $file_build_part = $_version_fields.FileBuildPart
+ If ($file_build_part -eq $null){
+ $file_build_part = ''
+ }
+ $file_private_part = $_version_fields.FilePrivatePart
+ If ($file_private_part -eq $null){
+ $file_private_part = ''
+ }
+}
+Catch{
+ Fail-Json $result "Error: $_.Exception.Message"
+}
+
+Set-Attr $result.win_file_version "path" $path.toString()
+Set-Attr $result.win_file_version "file_version" $file_version.toString()
+Set-Attr $result.win_file_version "product_version" $product_version.toString()
+Set-Attr $result.win_file_version "file_major_part" $file_major_part.toString()
+Set-Attr $result.win_file_version "file_minor_part" $file_minor_part.toString()
+Set-Attr $result.win_file_version "file_build_part" $file_build_part.toString()
+Set-Attr $result.win_file_version "file_private_part" $file_private_part.toString()
+Exit-Json $result;
+
diff --git a/windows/win_file_version.py b/windows/win_file_version.py
new file mode 100644
index 00000000000..f882a4439de
--- /dev/null
+++ b/windows/win_file_version.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Get DLL or EXE build version
+# Copyright © 2015 Sam Liu
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: win_file_version
+version_added: "2.1"
+short_description: Get DLL or EXE file build version
+description:
+ - Get DLL or EXE file build version
+ - change state alway be false
+options:
+ path:
+ description:
+ - File to get version(provide absolute path)
+ required: true
+ aliases: []
+author: Sam Liu
+'''
+
+EXAMPLES = '''
+# get C:\Windows\System32\cmd.exe version in playbook
+---
+- name: Get acm instance version
+ win_file_version:
+ path: 'C:\Windows\System32\cmd.exe'
+ register: exe_file_version
+
+- debug:
+ msg: '{{ exe_file_version }}'
+
+'''
+
+RETURN = """
+win_file_version.path:
+ description: file path
+ returned: always
+ type: string
+
+win_file_version.file_version:
+ description: file version number.
+ returned: no error
+ type: string
+
+win_file_version.product_version:
+ description: the version of the product this file is distributed with.
+ returned: no error
+ type: string
+
+win_file_version.file_major_part:
+ description: the major part of the version number.
+ returned: no error
+ type: string
+
+win_file_version.file_minor_part:
+ description: the minor part of the version number of the file.
+ returned: no error
+ type: string
+
+win_file_version.file_build_part:
+ description: build number of the file.
+ returned: no error
+ type: string
+
+win_file_version.file_private_part:
+ description: file private part number.
+ returned: no error
+ type: string
+
+"""
diff --git a/windows/win_firewall_rule.ps1 b/windows/win_firewall_rule.ps1
index 223d8b17b69..a63cedec0c1 100644
--- a/windows/win_firewall_rule.ps1
+++ b/windows/win_firewall_rule.ps1
@@ -22,9 +22,9 @@
function getFirewallRule ($fwsettings) {
try {
-
- #$output = Get-NetFirewallRule -name $($fwsettings.name);
- $rawoutput=@(netsh advfirewall firewall show rule name=$($fwsettings.Name))
+
+ #$output = Get-NetFirewallRule -name $($fwsettings.'Rule Name');
+ $rawoutput=@(netsh advfirewall firewall show rule name="$($fwsettings.'Rule Name')" verbose)
if (!($rawoutput -eq 'No rules match the specified criteria.')){
$rawoutput | Where {$_ -match '^([^:]+):\s*(\S.*)$'} | Foreach -Begin {
$FirstRun = $true;
@@ -51,10 +51,10 @@ function getFirewallRule ($fwsettings) {
$msg=@();
if ($($output|measure).count -gt 0) {
$exists=$true;
- $msg += @("The rule '" + $fwsettings.name + "' exists.");
+ $msg += @("The rule '" + $fwsettings.'Rule Name' + "' exists.");
if ($($output|measure).count -gt 1) {
$multi=$true
- $msg += @("The rule '" + $fwsettings.name + "' has multiple entries.");
+ $msg += @("The rule '" + $fwsettings.'Rule Name' + "' has multiple entries.");
ForEach($rule in $output.GetEnumerator()) {
ForEach($fwsetting in $fwsettings.GetEnumerator()) {
if ( $rule.$fwsetting -ne $fwsettings.$fwsetting) {
@@ -73,7 +73,7 @@ function getFirewallRule ($fwsettings) {
if (($fwsetting.Key -eq 'RemoteIP') -and ($output.$($fwsetting.Key) -eq ($fwsettings.$($fwsetting.Key)+'-'+$fwsettings.$($fwsetting.Key)))) {
$donothing=$false
- } elseif ((($fwsetting.Key -eq 'Name') -or ($fwsetting.Key -eq 'DisplayName')) -and ($output."Rule Name" -eq $fwsettings.$($fwsetting.Key))) {
+ } elseif (($fwsetting.Key -eq 'DisplayName') -and ($output."Rule Name" -eq $fwsettings.$($fwsetting.Key))) {
$donothing=$false
} else {
$diff=$true;
@@ -94,6 +94,7 @@ function getFirewallRule ($fwsettings) {
$msg += @("No rule could be found");
};
$result = @{
+ failed = $false
exists = $exists
identical = $correct
multiple = $multi
@@ -112,19 +113,26 @@ function getFirewallRule ($fwsettings) {
function createFireWallRule ($fwsettings) {
$msg=@()
- $execString="netsh advfirewall firewall add rule "
+ $execString="netsh advfirewall firewall add rule"
ForEach ($fwsetting in $fwsettings.GetEnumerator()) {
if ($fwsetting.key -eq 'Direction') {
$key='dir'
+ } elseif ($fwsetting.key -eq 'Rule Name') {
+ $key='name'
+ } elseif ($fwsetting.key -eq 'Enabled') {
+ $key='enable'
+ } elseif ($fwsetting.key -eq 'Profiles') {
+ $key='profile'
} else {
$key=$($fwsetting.key).ToLower()
};
$execString+=" ";
$execString+=$key;
$execString+="=";
+ $execString+='"';
$execString+=$fwsetting.value;
- #$execString+="'";
+ $execString+='"';
};
try {
#$msg+=@($execString);
@@ -132,6 +140,7 @@ function createFireWallRule ($fwsettings) {
$msg+=@("Created firewall rule $name");
$result=@{
+ failed = $false
output=$output
changed=$true
msg=$msg
@@ -152,7 +161,7 @@ function createFireWallRule ($fwsettings) {
function removeFireWallRule ($fwsettings) {
$msg=@()
try {
- $rawoutput=@(netsh advfirewall firewall delete rule name=$($fwsettings.name))
+ $rawoutput=@(netsh advfirewall firewall delete rule name="$($fwsettings.'Rule Name')")
$rawoutput | Where {$_ -match '^([^:]+):\s*(\S.*)$'} | Foreach -Begin {
$FirstRun = $true;
$HashProps = @{};
@@ -193,72 +202,54 @@ $fwsettings=@{}
# Variabelise the arguments
$params=Parse-Args $args;
-$state=Get-Attr $params "state" "present";
-$name=Get-Attr $params "name" "";
-$direction=Get-Attr $params "direction" "";
-$force=Get-Attr $params "force" $false;
-$action=Get-Attr $params "action" "";
+$name = Get-AnsibleParam -obj $params -name "name" -failifempty $true
+$direction = Get-AnsibleParam -obj $params -name "direction" -failifempty $true -validateSet "in","out"
+$action = Get-AnsibleParam -obj $params -name "action" -failifempty $true -validateSet "allow","block","bypass"
+$program = Get-AnsibleParam -obj $params -name "program"
+$service = Get-AnsibleParam -obj $params -name "service" -default "any"
+$description = Get-AnsibleParam -obj $params -name "description"
+$enable = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "enable" -default "true")
+$winprofile = Get-AnsibleParam -obj $params -name "profile" -default "any"
+$localip = Get-AnsibleParam -obj $params -name "localip" -default "any"
+$remoteip = Get-AnsibleParam -obj $params -name "remoteip" -default "any"
+$localport = Get-AnsibleParam -obj $params -name "localport" -default "any"
+$remoteport = Get-AnsibleParam -obj $params -name "remoteport" -default "any"
+$protocol = Get-AnsibleParam -obj $params -name "protocol" -default "any"
+
+$state = Get-AnsibleParam -obj $params -name "state" -failifempty $true -validateSet "present","absent"
+$force = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "force" -default "false")
# Check the arguments
-if (($state -ne "present") -And ($state -ne "absent")){
- $misArg+="state";
- $msg+=@("for the state parameter only present and absent is allowed");
+If ($enable -eq $true) {
+ $fwsettings.Add("Enabled", "yes");
+} Else {
+ $fwsettings.Add("Enabled", "no");
};
-if ($name -eq ""){
- $misArg+="Name";
- $msg+=@("name is a required argument");
-} else {
- $fwsettings.Add("Name", $name)
- #$fwsettings.Add("displayname", $name)
-};
-if ((($direction.ToLower() -ne "In") -And ($direction.ToLower() -ne "Out")) -And ($state -eq "present")){
- $misArg+="Direction";
- $msg+=@("for the Direction parameter only the values 'In' and 'Out' are allowed");
-} else {
+$fwsettings.Add("Rule Name", $name)
+#$fwsettings.Add("displayname", $name)
+
+$state = $state.ToString().ToLower()
+If ($state -eq "present"){
$fwsettings.Add("Direction", $direction)
-};
-if ((($action.ToLower() -ne "allow") -And ($action.ToLower() -ne "block")) -And ($state -eq "present")){
- $misArg+="Action";
- $msg+=@("for the Action parameter only the values 'allow' and 'block' are allowed");
-} else {
$fwsettings.Add("Action", $action)
};
-$args=@(
- "Description",
- "LocalIP",
- "RemoteIP",
- "LocalPort",
- "RemotePort",
- "Program",
- "Service",
- "Protocol"
-)
-foreach ($arg in $args){
- New-Variable -Name $arg -Value $(Get-Attr $params $arg "");
- if ((Get-Variable -Name $arg -ValueOnly) -ne ""){
- $fwsettings.Add($arg, $(Get-Variable -Name $arg -ValueOnly));
- };
-};
-
-$winprofile=Get-Attr $params "profile" "current";
-if (($winprofile -ne 'current') -or ($winprofile -ne 'domain') -or ($winprofile -ne 'standard') -or ($winprofile -ne 'all') ) {
- $misArg+="Profile";
- $msg+=@("for the Profile parameter only the values 'current', 'domain', 'standard' or 'all' are allowed");
-} else {
+If ($description) {
+ $fwsettings.Add("Description", $description);
+}
- $fwsettings.Add("profile", $winprofile)
+If ($program) {
+ $fwsettings.Add("Program", $program);
}
-if ($($($misArg|measure).count) -gt 0){
- $result=New-Object psobject @{
- changed=$false
- failed=$true
- msg=$msg
- };
- Exit-Json($result);
-};
+$fwsettings.Add("LocalIP", $localip);
+$fwsettings.Add("RemoteIP", $remoteip);
+$fwsettings.Add("LocalPort", $localport);
+$fwsettings.Add("RemotePort", $remoteport);
+$fwsettings.Add("Service", $service);
+$fwsettings.Add("Protocol", $protocol);
+$fwsettings.Add("Profiles", $winprofile)
$output=@()
$capture=getFirewallRule ($fwsettings);
@@ -279,7 +270,7 @@ if ($capture.failed -eq $true) {
}
-switch ($state.ToLower()){
+switch ($state){
"present" {
if ($capture.exists -eq $false) {
$capture=createFireWallRule($fwsettings);
@@ -297,7 +288,7 @@ switch ($state.ToLower()){
};
Exit-Json $result;
}
- } elseif ($capture.identical -eq $false) {
+ } elseif ($capture.identical -eq $false) {
if ($force -eq $true) {
$capture=removeFirewallRule($fwsettings);
$msg+=$capture.msg;
diff --git a/windows/win_firewall_rule.py b/windows/win_firewall_rule.py
index 295979b248f..1a5c699f795 100644
--- a/windows/win_firewall_rule.py
+++ b/windows/win_firewall_rule.py
@@ -17,18 +17,27 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
-module: win_fw
+module: win_firewall_rule
version_added: "2.0"
author: Timothy Vandenbrande
short_description: Windows firewall automation
description:
- allows you to create/remove/update firewall rules
-options:
+options:
+ enable:
+ description:
+ - is this firewall rule enabled or disabled
+ default: true
+ required: false
state:
description:
- - create/remove/update or powermanage your VM
+ - should this rule be added or removed
default: "present"
required: true
choices: ['present', 'absent']
@@ -42,13 +51,13 @@
- is this rule for inbound or outbound trafic
default: null
required: true
- choices: [ 'In', 'Out' ]
+ choices: ['in', 'out']
action:
description:
- what to do with the items this rule is for
default: null
required: true
- choices: [ 'allow', 'block' ]
+ choices: ['allow', 'block', 'bypass']
description:
description:
- description for the firewall rule
@@ -57,22 +66,22 @@
localip:
description:
- the local ip address this rule applies to
- default: null
+ default: 'any'
required: false
remoteip:
description:
- the remote ip address/range this rule applies to
- default: null
+ default: 'any'
required: false
localport:
description:
- the local port this rule applies to
- default: null
+ default: 'any'
required: false
remoteport:
description:
- the remote port this rule applies to
- default: null
+ default: 'any'
required: false
program:
description:
@@ -82,35 +91,37 @@
service:
description:
- the service this rule applies to
- default: null
+ default: 'any'
required: false
protocol:
description:
- the protocol this rule applies to
- default: null
+ default: 'any'
required: false
profile:
- describtion:
- - the profile this rule applies to
- default: current
- choices: ['current', 'domain', 'standard', 'all']
+ description:
+ - the profile this rule applies to, e.g. Domain,Private,Public
+ default: 'any'
+ required: false
force:
description:
- Enforces the change if a rule with different values exists
default: false
required: false
-
+
'''
EXAMPLES = '''
-# create smtp firewall rule
- action: win_fw
+- name: Firewall rule to allow smtp on TCP port 25
+ action: win_firewall_rule
args:
name: smtp
+ enable: yes
state: present
localport: 25
action: allow
+ direction: In
protocol: TCP
'''
diff --git a/windows/win_iis_virtualdirectory.ps1 b/windows/win_iis_virtualdirectory.ps1
index 3f2ab692b42..44854ff09b4 100644
--- a/windows/win_iis_virtualdirectory.ps1
+++ b/windows/win_iis_virtualdirectory.ps1
@@ -66,7 +66,11 @@ $directory_path = if($application) {
}
# Directory info
-$directory = Get-WebVirtualDirectory -Site $site -Name $name
+$directory = if($application) {
+ Get-WebVirtualDirectory -Site $site -Name $name -Application $application
+} else {
+ Get-WebVirtualDirectory -Site $site -Name $name
+}
try {
# Add directory
diff --git a/windows/win_iis_virtualdirectory.py b/windows/win_iis_virtualdirectory.py
index 1ccb34a65d3..9388cb9d6be 100644
--- a/windows/win_iis_virtualdirectory.py
+++ b/windows/win_iis_virtualdirectory.py
@@ -18,13 +18,17 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_iis_virtualdirectory
version_added: "2.0"
-short_description: Configures a IIS virtual directories.
+short_description: Configures a virtual directory in IIS.
description:
- - Creates, Removes and configures a IIS Web site
+ - Creates, Removes and configures a virtual directory in IIS.
options:
name:
description:
@@ -37,12 +41,11 @@
- absent
- present
required: false
- default: null
+ default: present
site:
description:
- The site name under which the virtual directory is created or exists.
- required: false
- default: null
+ required: true
application:
description:
- The application under which the virtual directory is created or exists.
@@ -55,3 +58,14 @@
default: null
author: Henrik Wallström
'''
+
+EXAMPLES = '''
+# This creates a virtual directory if it doesn't exist.
+$ ansible -i hosts -m win_iis_virtualdirectory -a "name='somedirectory' site=somesite state=present physical_path=c:\\virtualdirectory\\some" host
+
+# This removes a virtual directory if it exists.
+$ ansible -i hosts -m win_iis_virtualdirectory -a "name='somedirectory' site=somesite state=absent" host
+
+# This creates a virtual directory on an application if it doesn't exist.
+$ ansible -i hosts -m win_iis_virtualdirectory -a "name='somedirectory' site=somesite application=someapp state=present physical_path=c:\\virtualdirectory\\some" host
+'''
diff --git a/windows/win_iis_webapplication.py b/windows/win_iis_webapplication.py
index b8ebd085162..26177eb90b2 100644
--- a/windows/win_iis_webapplication.py
+++ b/windows/win_iis_webapplication.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_iis_webapplication
diff --git a/windows/win_iis_webapppool.ps1 b/windows/win_iis_webapppool.ps1
index 2ed369e4a3f..4172dc2f336 100644
--- a/windows/win_iis_webapppool.ps1
+++ b/windows/win_iis_webapppool.ps1
@@ -39,7 +39,7 @@ If (($state -Ne $FALSE) -And ($state -NotIn $valid_states)) {
# Attributes parameter - Pipe separated list of attributes where
# keys and values are separated by comma (paramA:valyeA|paramB:valueB)
$attributes = @{};
-If ($params.attributes) {
+If (Get-Member -InputObject $params -Name attributes) {
$params.attributes -split '\|' | foreach {
$key, $value = $_ -split "\:";
$attributes.Add($key, $value);
@@ -90,10 +90,18 @@ try {
Stop-WebAppPool -Name $name -ErrorAction Stop
$result.changed = $TRUE
}
- if ((($state -eq 'started') -and ($pool.State -eq 'Stopped')) -or ($state -eq 'restarted')) {
+ if ((($state -eq 'started') -and ($pool.State -eq 'Stopped'))) {
Start-WebAppPool -Name $name -ErrorAction Stop
$result.changed = $TRUE
}
+ if ($state -eq 'restarted') {
+ switch ($pool.State)
+ {
+ 'Stopped' { Start-WebAppPool -Name $name -ErrorAction Stop }
+ default { Restart-WebAppPool -Name $name -ErrorAction Stop }
+ }
+ $result.changed = $TRUE
+ }
}
} catch {
Fail-Json $result $_.Exception.Message
@@ -101,12 +109,15 @@ try {
# Result
$pool = Get-Item IIS:\AppPools\$name
-$result.info = @{
- name = $pool.Name
- state = $pool.State
- attributes = New-Object psobject @{}
-};
-
-$pool.Attributes | ForEach { $result.info.attributes.Add($_.Name, $_.Value)};
+if ($pool)
+{
+ $result.info = @{
+ name = $pool.Name
+ state = $pool.State
+ attributes = New-Object psobject @{}
+ };
+
+ $pool.Attributes | ForEach { $result.info.attributes.Add($_.Name, $_.Value)};
+}
-Exit-Json $result
+Exit-Json $result
\ No newline at end of file
diff --git a/windows/win_iis_webapppool.py b/windows/win_iis_webapppool.py
index c77c3b04cb7..e2cb8778b5f 100644
--- a/windows/win_iis_webapppool.py
+++ b/windows/win_iis_webapppool.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_iis_webapppool
diff --git a/windows/win_iis_webbinding.ps1 b/windows/win_iis_webbinding.ps1
index bdff43fc63c..dfd9cdb958b 100644
--- a/windows/win_iis_webbinding.ps1
+++ b/windows/win_iis_webbinding.ps1
@@ -23,42 +23,35 @@
$params = Parse-Args $args;
-# Name parameter
-$name = Get-Attr $params "name" $FALSE;
-If ($name -eq $FALSE) {
- Fail-Json (New-Object psobject) "missing required argument: name";
-}
-
-# State parameter
-$state = Get-Attr $params "state" $FALSE;
-$valid_states = ($FALSE, 'present', 'absent');
-If ($state -NotIn $valid_states) {
- Fail-Json $result "state is '$state'; must be $($valid_states)"
-}
+$name = Get-AnsibleParam $params -name "name" -failifempty $true
+$state = Get-AnsibleParam $params "state" -default "present" -validateSet "present","absent"
+$host_header = Get-AnsibleParam $params -name "host_header"
+$protocol = Get-AnsibleParam $params -name "protocol"
+$port = Get-AnsibleParam $params -name "port"
+$ip = Get-AnsibleParam $params -name "ip"
+$certificatehash = Get-AnsibleParam $params -name "certificate_hash" -default $false
+$certificateStoreName = Get-AnsibleParam $params -name "certificate_store_name" -default "MY"
$binding_parameters = New-Object psobject @{
Name = $name
};
-If ($params.host_header) {
- $binding_parameters.HostHeader = $params.host_header
+If ($host_header) {
+ $binding_parameters.HostHeader = $host_header
}
-If ($params.protocol) {
- $binding_parameters.Protocol = $params.protocol
+If ($protocol) {
+ $binding_parameters.Protocol = $protocol
}
-If ($params.port) {
- $binding_parameters.Port = $params.port
+If ($port) {
+ $binding_parameters.Port = $port
}
-If ($params.ip) {
- $binding_parameters.IPAddress = $params.ip
+If ($ip) {
+ $binding_parameters.IPAddress = $ip
}
-$certificateHash = Get-Attr $params "certificate_hash" $FALSE;
-$certificateStoreName = Get-Attr $params "certificate_store_name" "MY";
-
# Ensure WebAdministration module is loaded
if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null){
Import-Module WebAdministration
@@ -98,12 +91,12 @@ try {
# Select certificat
if($certificateHash -ne $FALSE) {
- $ip = $binding_parameters.IPAddress
+ $ip = $binding_parameters["IPAddress"]
if((!$ip) -or ($ip -eq "*")) {
$ip = "0.0.0.0"
}
- $port = $binding_parameters.Port
+ $port = $binding_parameters["Port"]
if(!$port) {
$port = 443
}
diff --git a/windows/win_iis_webbinding.py b/windows/win_iis_webbinding.py
index 061bed73723..c7a08628f48 100644
--- a/windows/win_iis_webbinding.py
+++ b/windows/win_iis_webbinding.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_iis_webbinding
@@ -66,12 +70,6 @@
required: false
default: null
aliases: []
- protocol:
- description:
- - The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP).
- required: false
- default: null
- aliases: []
certificate_hash:
description:
- Certificate hash for the SSL binding. The certificate hash is the unique identifier for the certificate.
diff --git a/windows/win_iis_website.ps1 b/windows/win_iis_website.ps1
index 26a8df12730..74fc3df3026 100644
--- a/windows/win_iis_website.ps1
+++ b/windows/win_iis_website.ps1
@@ -37,6 +37,7 @@ If (($state -ne $FALSE) -and ($state -ne 'started') -and ($state -ne 'stopped')
# Path parameter
$physical_path = Get-Attr $params "physical_path" $FALSE;
+$site_id = Get-Attr $params "site_id" $FALSE;
# Application Pool Parameter
$application_pool = Get-Attr $params "application_pool" $FALSE;
@@ -91,6 +92,10 @@ Try {
$site_parameters.ApplicationPool = $application_pool
}
+ If ($site_id) {
+ $site_parameters.ID = $site_id
+ }
+
If ($bind_port) {
$site_parameters.Port = $bind_port
}
@@ -103,6 +108,12 @@ Try {
$site_parameters.HostHeader = $bind_hostname
}
+ # Fix for error "New-Item : Index was outside the bounds of the array."
+ # This is a bug in the New-WebSite commandlet. Apparently there must be at least one site configured in IIS otherwise New-WebSite crashes.
+ # For more details, see http://stackoverflow.com/questions/3573889/ps-c-new-website-blah-throws-index-was-outside-the-bounds-of-the-array
+ $sites_list = get-childitem -Path IIS:\sites
+ if ($sites_list -eq $null) { $site_parameters.ID = 1 }
+
$site = New-Website @site_parameters -Force
$result.changed = $true
}
@@ -165,15 +176,21 @@ Catch
Fail-Json (New-Object psobject) $_.Exception.Message
}
-$site = Get-Website | Where { $_.Name -eq $name }
-$result.site = New-Object psobject @{
- Name = $site.Name
- ID = $site.ID
- State = $site.State
- PhysicalPath = $site.PhysicalPath
- ApplicationPool = $site.applicationPool
- Bindings = @($site.Bindings.Collection | ForEach-Object { $_.BindingInformation })
+if ($state -ne 'absent')
+{
+ $site = Get-Website | Where { $_.Name -eq $name }
}
+if ($site)
+{
+ $result.site = New-Object psobject @{
+ Name = $site.Name
+ ID = $site.ID
+ State = $site.State
+ PhysicalPath = $site.PhysicalPath
+ ApplicationPool = $site.applicationPool
+ Bindings = @($site.Bindings.Collection | ForEach-Object { $_.BindingInformation })
+ }
+}
Exit-Json $result
diff --git a/windows/win_iis_website.py b/windows/win_iis_website.py
index 8921afe5970..9c65c067c95 100644
--- a/windows/win_iis_website.py
+++ b/windows/win_iis_website.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_iis_website
@@ -32,6 +36,12 @@
required: true
default: null
aliases: []
+ site_id:
+ description:
+ - Explicitly set the IIS numeric ID for a site. Note that this value cannot be changed after the website has been created.
+ required: false
+ version_added: "2.1"
+ default: null
state:
description:
- State of the web site
diff --git a/windows/win_nssm.ps1 b/windows/win_nssm.ps1
index 841bc3aa3fd..da3d01a7161 100644
--- a/windows/win_nssm.ps1
+++ b/windows/win_nssm.ps1
@@ -2,6 +2,8 @@
# This file is part of Ansible
#
# Copyright 2015, George Frank
+# Copyright 2015, Adam Keech
+# Copyright 2015, Hans-Joachim Kliemeck
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,68 +24,49 @@ $ErrorActionPreference = "Stop"
# POWERSHELL_COMMON
$params = Parse-Args $args;
+
$result = New-Object PSObject;
Set-Attr $result "changed" $false;
+$name = Get-Attr $params "name" -failifempty $true
+$state = Get-Attr $params "state" -default "present" -validateSet "present", "absent", "started", "stopped", "restarted" -resultobj $result
-If ($params.name)
-{
- $name = $params.name
-}
-Else
-{
- Fail-Json $result "missing required argument: name"
-}
+$application = Get-Attr $params "application" -default $null
+$appParameters = Get-Attr $params "app_parameters" -default $null
+$startMode = Get-Attr $params "start_mode" -default "auto" -validateSet "auto", "manual", "disabled" -resultobj $result
-If ($params.state)
-{
- $state = $params.state.ToString().ToLower()
- $validStates = "present", "absent", "started", "stopped", "restarted"
-
- If ($validStates -notcontains $state)
- {
- Fail-Json $result "state is $state; must be one of: $validStates"
- }
-}
-else
-{
- $state = "present"
-}
+$stdoutFile = Get-Attr $params "stdout_file" -default $null
+$stderrFile = Get-Attr $params "stderr_file" -default $null
+$dependencies = Get-Attr $params "dependencies" -default $null
-If ($params.application)
-{
- $application = $params.application
-}
-Else
-{
- $application = $null
-}
+$user = Get-Attr $params "user" -default $null
+$password = Get-Attr $params "password" -default $null
-If ($params.app_parameters)
-{
- $appParameters = $params.app_parameters
-}
-Else
-{
- $appParameters = $null
-}
-If ($params.stdout_file)
+#abstract the calling of nssm because some PowerShell environments
+#mishandle its stdout(which is Unicode) as UTF8
+Function Nssm-Invoke
{
- $stdoutFile = $params.stdout_file
-}
-Else
-{
- $stdoutFile = $null
-}
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$cmd
+ )
+ Try {
+ $encodingWas = [System.Console]::OutputEncoding
+ [System.Console]::OutputEncoding = [System.Text.Encoding]::Unicode
-If ($params.stderr_file)
-{
- $stderrFile = $params.stderr_file
-}
-Else
-{
- $stderrFile = $null
+ $nssmOutput = invoke-expression "nssm $cmd"
+ return $nssmOutput
+ }
+ Catch {
+ $ErrorMessage = $_.Exception.Message
+ Fail-Json $result "an exception occurred when invoking NSSM: $ErrorMessage"
+ }
+ Finally {
+ # Set the console encoding back to what it was
+ [System.Console]::OutputEncoding = $encodingWas
+ }
}
Function Service-Exists
@@ -107,11 +90,11 @@ Function Nssm-Remove
if (Service-Exists -name $name)
{
- $cmd = "nssm stop ""$name"""
- $results = invoke-expression $cmd
+ $cmd = "stop ""$name"""
+ $results = Nssm-Invoke $cmd
- $cmd = "nssm remove ""$name"" confirm"
- $results = invoke-expression $cmd
+ $cmd = "remove ""$name"" confirm"
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -120,6 +103,7 @@ Function Nssm-Remove
Throw "Error removing service ""$name"""
}
+ Set-Attr $result "changed_by" "remove_service"
$result.changed = $true
}
}
@@ -131,6 +115,7 @@ Function Nssm-Install
[Parameter(Mandatory=$true)]
[string]$name,
[Parameter(Mandatory=$true)]
+ [AllowEmptyString()]
[string]$application
)
@@ -138,12 +123,13 @@ Function Nssm-Install
{
Throw "Error installing service ""$name"". No application was supplied."
}
+ If (-Not (Test-Path -Path $application -PathType Leaf)) {
+ Throw "$application does not exist on the host"
+ }
if (!(Service-Exists -name $name))
{
- $cmd = "nssm install ""$name"" $application"
-
- $results = invoke-expression $cmd
+ $results = Nssm-Invoke "install ""$name"" $application"
if ($LastExitCode -ne 0)
{
@@ -152,11 +138,11 @@ Function Nssm-Install
Throw "Error installing service ""$name"""
}
+ Set-Attr $result "changed_by" "install_service"
$result.changed = $true
} else {
- $cmd = "nssm get ""$name"" Application"
- $results = invoke-expression $cmd
+ $results = Nssm-Invoke "get ""$name"" Application"
if ($LastExitCode -ne 0)
{
@@ -165,11 +151,11 @@ Function Nssm-Install
Throw "Error installing service ""$name"""
}
- if ($results -ne $application)
+ if ($results -cnotlike $application)
{
- $cmd = "nssm set ""$name"" Application $application"
+ $cmd = "set ""$name"" Application $application"
- $results = invoke-expression $cmd
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -177,10 +163,27 @@ Function Nssm-Install
Set-Attr $result "nssm_error_log" "$results"
Throw "Error installing service ""$name"""
}
+ Set-Attr $result "application" "$application"
+ Set-Attr $result "changed_by" "reinstall_service"
$result.changed = $true
}
}
+
+ if ($result.changed)
+ {
+ $applicationPath = (Get-Item $application).DirectoryName
+ $cmd = "nssm set ""$name"" AppDirectory $applicationPath"
+
+ $results = invoke-expression $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error installing service ""$name"""
+ }
+ }
}
Function ParseAppParameters()
@@ -188,10 +191,13 @@ Function ParseAppParameters()
[CmdletBinding()]
param(
[Parameter(Mandatory=$true)]
+ [AllowEmptyString()]
[string]$appParameters
)
- return ConvertFrom-StringData -StringData $appParameters.TrimStart("@").TrimStart("{").TrimEnd("}").Replace("; ","`n")
+ $escapedAppParameters = $appParameters.TrimStart("@").TrimStart("{").TrimEnd("}").Replace("; ","`n").Replace("\","\\")
+
+ return ConvertFrom-StringData -StringData $escapedAppParameters
}
@@ -202,11 +208,12 @@ Function Nssm-Update-AppParameters
[Parameter(Mandatory=$true)]
[string]$name,
[Parameter(Mandatory=$true)]
+ [AllowEmptyString()]
[string]$appParameters
)
- $cmd = "nssm get ""$name"" AppParameters"
- $results = invoke-expression $cmd
+ $cmd = "get ""$name"" AppParameters"
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -215,36 +222,45 @@ Function Nssm-Update-AppParameters
Throw "Error updating AppParameters for service ""$name"""
}
- $appParametersHash = ParseAppParameters -appParameters $appParameters
-
$appParamKeys = @()
$appParamVals = @()
$singleLineParams = ""
- $appParametersHash.GetEnumerator() |
- % {
- $key = $($_.Name)
- $val = $($_.Value)
-
- $appParamKeys += $key
- $appParamVals += $val
-
- if ($key -eq "_") {
- $singleLineParams = "$val " + $singleLineParams
- } else {
- $singleLineParams = $singleLineParams + "$key ""$val"""
+
+ if ($appParameters)
+ {
+ $appParametersHash = ParseAppParameters -appParameters $appParameters
+ $appParametersHash.GetEnumerator() |
+ % {
+ $key = $($_.Name)
+ $val = $($_.Value)
+
+ $appParamKeys += $key
+ $appParamVals += $val
+
+ if ($key -eq "_") {
+ $singleLineParams = "$val " + $singleLineParams
+ } else {
+ $singleLineParams = $singleLineParams + "$key ""$val"""
+ }
}
- }
+
+ Set-Attr $result "nssm_app_parameters_parsed" $appParametersHash
+ Set-Attr $result "nssm_app_parameters_keys" $appParamKeys
+ Set-Attr $result "nssm_app_parameters_vals" $appParamVals
+ }
Set-Attr $result "nssm_app_parameters" $appParameters
- Set-Attr $result "nssm_app_parameters_parsed" $appParametersHash
- Set-Attr $result "nssm_app_parameters_keys" $appParamKeys
- Set-Attr $result "nssm_app_parameters_vals" $appParamVals
Set-Attr $result "nssm_single_line_app_parameters" $singleLineParams
if ($results -ne $singleLineParams)
{
- $cmd = "nssm set ""$name"" AppParameters $singleLineParams"
- $results = invoke-expression $cmd
+ if ($appParameters)
+ {
+ $cmd = "set ""$name"" AppParameters $singleLineParams"
+ } else {
+ $cmd = "set ""$name"" AppParameters '""""'"
+ }
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -253,11 +269,12 @@ Function Nssm-Update-AppParameters
Throw "Error updating AppParameters for service ""$name"""
}
+ Set-Attr $result "changed_by" "update_app_parameters"
$result.changed = $true
}
}
-Function Nssm-Set-Ouput-Files
+Function Nssm-Set-Output-Files
{
[CmdletBinding()]
param(
@@ -267,8 +284,8 @@ Function Nssm-Set-Ouput-Files
[string]$stderr
)
- $cmd = "nssm get ""$name"" AppStdout"
- $results = invoke-expression $cmd
+ $cmd = "get ""$name"" AppStdout"
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -277,16 +294,16 @@ Function Nssm-Set-Ouput-Files
Throw "Error retrieving existing stdout file for service ""$name"""
}
- if ($results -ne $stdout)
+ if ($results -cnotlike $stdout)
{
if (!$stdout)
{
- $cmd = "nssm reset ""$name"" AppStdout"
+ $cmd = "reset ""$name"" AppStdout"
} else {
- $cmd = "nssm set ""$name"" AppStdout $stdout"
+ $cmd = "set ""$name"" AppStdout $stdout"
}
- $results = invoke-expression $cmd
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -295,11 +312,12 @@ Function Nssm-Set-Ouput-Files
Throw "Error setting stdout file for service ""$name"""
}
+ Set-Attr $result "changed_by" "set_stdout"
$result.changed = $true
}
- $cmd = "nssm get ""$name"" AppStderr"
- $results = invoke-expression $cmd
+ $cmd = "get ""$name"" AppStderr"
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -308,12 +326,12 @@ Function Nssm-Set-Ouput-Files
Throw "Error retrieving existing stderr file for service ""$name"""
}
- if ($results -ne $stderr)
+ if ($results -cnotlike $stderr)
{
if (!$stderr)
{
- $cmd = "nssm reset ""$name"" AppStderr"
- $results = invoke-expression $cmd
+ $cmd = "reset ""$name"" AppStderr"
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -322,8 +340,8 @@ Function Nssm-Set-Ouput-Files
Throw "Error clearing stderr file setting for service ""$name"""
}
} else {
- $cmd = "nssm set ""$name"" AppStderr $stderr"
- $results = invoke-expression $cmd
+ $cmd = "set ""$name"" AppStderr $stderr"
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -333,6 +351,7 @@ Function Nssm-Set-Ouput-Files
}
}
+ Set-Attr $result "changed_by" "set_stderr"
$result.changed = $true
}
@@ -341,28 +360,152 @@ Function Nssm-Set-Ouput-Files
###
#set files to overwrite
- $cmd = "nssm set ""$name"" AppStdoutCreationDisposition 2"
- $results = invoke-expression $cmd
+ $cmd = "set ""$name"" AppStdoutCreationDisposition 2"
+ $results = Nssm-Invoke $cmd
- $cmd = "nssm set ""$name"" AppStderrCreationDisposition 2"
- $results = invoke-expression $cmd
+ $cmd = "set ""$name"" AppStderrCreationDisposition 2"
+ $results = Nssm-Invoke $cmd
#enable file rotation
- $cmd = "nssm set ""$name"" AppRotateFiles 1"
- $results = invoke-expression $cmd
+ $cmd = "set ""$name"" AppRotateFiles 1"
+ $results = Nssm-Invoke $cmd
#don't rotate until the service restarts
- $cmd = "nssm set ""$name"" AppRotateOnline 0"
- $results = invoke-expression $cmd
+ $cmd = "set ""$name"" AppRotateOnline 0"
+ $results = Nssm-Invoke $cmd
#both of the below conditions must be met before rotation will happen
#minimum age before rotating
- $cmd = "nssm set ""$name"" AppRotateSeconds 86400"
- $results = invoke-expression $cmd
+ $cmd = "set ""$name"" AppRotateSeconds 86400"
+ $results = Nssm-Invoke $cmd
#minimum size before rotating
- $cmd = "nssm set ""$name"" AppRotateBytes 104858"
- $results = invoke-expression $cmd
+ $cmd = "set ""$name"" AppRotateBytes 104858"
+ $results = Nssm-Invoke $cmd
+}
+
+Function Nssm-Update-Credentials
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name,
+ [Parameter(Mandatory=$false)]
+ [string]$user,
+ [Parameter(Mandatory=$false)]
+ [string]$password
+ )
+
+ $cmd = "get ""$name"" ObjectName"
+ $results = Nssm-Invoke $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating credentials for service ""$name"""
+ }
+
+ if ($user) {
+ if (!$password) {
+ Throw "User without password is informed for service ""$name"""
+ }
+ else {
+ $fullUser = $user
+ If (-Not($user.contains("@")) -And ($user.Split("\").count -eq 1)) {
+ $fullUser = ".\" + $user
+ }
+
+ If ($results -ne $fullUser) {
+ $cmd = "set ""$name"" ObjectName $fullUser $password"
+ $results = Nssm-Invoke $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating credentials for service ""$name"""
+ }
+
+ Set-Attr $result "changed_by" "update_credentials"
+ $result.changed = $true
+ }
+ }
+ }
+}
+
+Function Nssm-Update-Dependencies
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name,
+ [Parameter(Mandatory=$false)]
+ [string]$dependencies
+ )
+
+ $cmd = "get ""$name"" DependOnService"
+ $results = Nssm-Invoke $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating dependencies for service ""$name"""
+ }
+
+ If (($dependencies) -and ($results.Tolower() -ne $dependencies.Tolower())) {
+ $cmd = "set ""$name"" DependOnService $dependencies"
+ $results = Nssm-Invoke $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating dependencies for service ""$name"""
+ }
+
+ Set-Attr $result "changed_by" "update-dependencies"
+ $result.changed = $true
+ }
+}
+
+Function Nssm-Update-StartMode
+{
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [string]$name,
+ [Parameter(Mandatory=$true)]
+ [string]$mode
+ )
+
+ $cmd = "get ""$name"" Start"
+ $results = Nssm-Invoke $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating start mode for service ""$name"""
+ }
+
+ $modes=@{"auto" = "SERVICE_AUTO_START"; "manual" = "SERVICE_DEMAND_START"; "disabled" = "SERVICE_DISABLED"}
+ $mappedMode = $modes.$mode
+ if ($results -cnotlike $mappedMode) {
+ $cmd = "set ""$name"" Start $mappedMode"
+ $results = Nssm-Invoke $cmd
+
+ if ($LastExitCode -ne 0)
+ {
+ Set-Attr $result "nssm_error_cmd" $cmd
+ Set-Attr $result "nssm_error_log" "$results"
+ Throw "Error updating start mode for service ""$name"""
+ }
+
+ Set-Attr $result "changed_by" "start_mode"
+ $result.changed = $true
+ }
}
Function Nssm-Get-Status
@@ -373,8 +516,8 @@ Function Nssm-Get-Status
[string]$name
)
- $cmd = "nssm status ""$name"""
- $results = invoke-expression $cmd
+ $cmd = "status ""$name"""
+ $results = Nssm-Invoke $cmd
return ,$results
}
@@ -417,9 +560,9 @@ Function Nssm-Start-Service-Command
[string]$name
)
- $cmd = "nssm start ""$name"""
+ $cmd = "start ""$name"""
- $results = invoke-expression $cmd
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -428,6 +571,7 @@ Function Nssm-Start-Service-Command
Throw "Error starting service ""$name"""
}
+ Set-Attr $result "changed_by" "start_service"
$result.changed = $true
}
@@ -439,9 +583,9 @@ Function Nssm-Stop-Service-Command
[string]$name
)
- $cmd = "nssm stop ""$name"""
+ $cmd = "stop ""$name"""
- $results = invoke-expression $cmd
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -450,6 +594,7 @@ Function Nssm-Stop-Service-Command
Throw "Error stopping service ""$name"""
}
+ Set-Attr $result "changed_by" "stop_service_command"
$result.changed = $true
}
@@ -470,11 +615,11 @@ Function Nssm-Stop
Throw "Error stopping service ""$name"""
}
- if (currentStatus -ne "SERVICE_STOPPED")
+ if ($currentStatus -ne "SERVICE_STOPPED")
{
- $cmd = "nssm stop ""$name"""
+ $cmd = "stop ""$name"""
- $results = invoke-expression $cmd
+ $results = Nssm-Invoke $cmd
if ($LastExitCode -ne 0)
{
@@ -483,6 +628,7 @@ Function Nssm-Stop
Throw "Error stopping service ""$name"""
}
+ Set-Attr $result "changed_by" "stop_service"
$result.changed = $true
}
}
@@ -499,32 +645,34 @@ Function Nssm-Restart
Nssm-Start-Service-Command -name $name
}
+Function NssmProcedure
+{
+ Nssm-Install -name $name -application $application
+ Nssm-Update-AppParameters -name $name -appParameters $appParameters
+ Nssm-Set-Output-Files -name $name -stdout $stdoutFile -stderr $stderrFile
+ Nssm-Update-Dependencies -name $name -dependencies $dependencies
+ Nssm-Update-Credentials -name $name -user $user -password $password
+ Nssm-Update-StartMode -name $name -mode $startMode
+}
+
Try
{
switch ($state)
{
"absent" { Nssm-Remove -name $name }
"present" {
- Nssm-Install -name $name -application $application
- Nssm-Update-AppParameters -name $name -appParameters $appParameters
- Nssm-Set-Ouput-Files -name $name -stdout $stdoutFile -stderr $stderrFile
+ NssmProcedure
}
"started" {
- Nssm-Install -name $name -application $application
- Nssm-Update-AppParameters -name $name -appParameters $appParameters
- Nssm-Set-Ouput-Files -name $name -stdout $stdoutFile -stderr $stderrFile
+ NssmProcedure
Nssm-Start -name $name
}
"stopped" {
- Nssm-Install -name $name -application $application
- Nssm-Update-AppParameters -name $name -appParameters $appParameters
- Nssm-Set-Ouput-Files -name $name -stdout $stdoutFile -stderr $stderrFile
+ NssmProcedure
Nssm-Stop -name $name
}
"restarted" {
- Nssm-Install -name $name -application $application
- Nssm-Update-AppParameters -name $name -appParameters $appParameters
- Nssm-Set-Ouput-Files -name $name -stdout $stdoutFile -stderr $stderrFile
+ NssmProcedure
Nssm-Restart -name $name
}
}
@@ -535,4 +683,3 @@ Catch
{
Fail-Json $result $_.Exception.Message
}
-
diff --git a/windows/win_nssm.py b/windows/win_nssm.py
index d2a82c12617..57d9dfa3cb5 100644
--- a/windows/win_nssm.py
+++ b/windows/win_nssm.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_nssm
@@ -29,7 +33,7 @@
description:
- nssm is a service helper which doesn't suck. See https://nssm.cc/ for more information.
requirements:
- - "nssm >= 2.24.0 # (install via win_chocolatey) win_chocolatey: name=nssm"
+ - "nssm >= 2.24.0 # (install via win_chocolatey) win_chocolatey: name=nssm"
options:
name:
description:
@@ -52,7 +56,7 @@
- The application binary to run as a service
- "Specify this whenever the service may need to be installed (state: present, started, stopped, restarted)"
- "Note that the application name must look like the following, if the directory includes spaces:"
- - "nssm install service \\"c:\\Program Files\\app.exe\\" \\"\\"\\"C:\\Path with spaces\\"\\"\\""
+ - 'nssm install service "c:\\Program Files\\app.exe\\" "C:\\Path with spaces\\"'
- "See commit 0b386fc1984ab74ee59b7bed14b7e8f57212c22b in the nssm.git project for more info (https://git.nssm.cc/?p=nssm.git;a=commit;h=0b386fc1984ab74ee59b7bed14b7e8f57212c22b)"
required: false
default: null
@@ -71,59 +75,104 @@
- Parameters to be passed to the application when it starts
required: false
default: null
+ dependencies:
+ description:
+ - Service dependencies that has to be started to trigger startup, separated by comma.
+ required: false
+ default: null
+ user:
+ description:
+ - User to be used for service startup
+ required: false
+ default: null
+ password:
+ description:
+ - Password to be used for service startup
+ required: false
+ default: null
+ start_mode:
+ description:
+ - If C(auto) is selected, the service will start at bootup. C(manual) means that the service will start only when another service needs it. C(disabled) means that the service will stay off, regardless if it is needed or not.
+ required: true
+ default: auto
+ choices:
+ - auto
+ - manual
+ - disabled
author:
- "Adam Keech (@smadam813)"
- "George Frank (@georgefrank)"
+ - "Hans-Joachim Kliemeck (@h0nIg)"
'''
EXAMPLES = '''
- # Install and start the foo service
- win_nssm:
+# Install and start the foo service
+- win_nssm:
name: foo
- application: C:\windows\foo.exe
+ application: C:\windows\\foo.exe
- # Install and start the foo service with a key-value pair argument
- # This will yield the following command: C:\windows\foo.exe bar "true"
- win_nssm:
+# Install and start the foo service with a key-value pair argument
+# This will yield the following command: C:\windows\\foo.exe bar "true"
+- win_nssm:
name: foo
- application: C:\windows\foo.exe
+ application: C:\windows\\foo.exe
app_parameters:
bar: true
- # Install and start the foo service with a key-value pair argument, where the argument needs to start with a dash
- # This will yield the following command: C:\windows\foo.exe -bar "true"
- win_nssm:
+# Install and start the foo service with a key-value pair argument, where the argument needs to start with a dash
+# This will yield the following command: C:\windows\\foo.exe -bar "true"
+- win_nssm:
name: foo
- application: C:\windows\foo.exe
+ application: C:\windows\\foo.exe
app_parameters:
"-bar": true
- # Install and start the foo service with a single parameter
- # This will yield the following command: C:\windows\foo.exe bar
- win_nssm:
+# Install and start the foo service with a single parameter
+# This will yield the following command: C:\windows\\foo.exe bar
+- win_nssm:
name: foo
- application: C:\windows\foo.exe
+ application: C:\windows\\foo.exe
app_parameters:
_: bar
- # Install and start the foo service with a mix of single params, and key value pairs
- # This will yield the following command: C:\windows\foo.exe bar -file output.bat
- win_nssm:
+# Install and start the foo service with a mix of single params, and key value pairs
+# This will yield the following command: C:\windows\\foo.exe bar -file output.bat
+- win_nssm:
name: foo
- application: C:\windows\foo.exe
+ application: C:\windows\\foo.exe
app_parameters:
_: bar
"-file": "output.bat"
- # Install and start the foo service, redirecting stdout and stderr to the same file
- win_nssm:
+# Install and start the foo service, redirecting stdout and stderr to the same file
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+ stdout_file: C:\windows\\foo.log
+ stderr_file: C:\windows\\foo.log
+
+# Install and start the foo service, but wait for dependencies tcpip and adf
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+ dependencies: 'adf,tcpip'
+
+# Install and start the foo service with dedicated user
+- win_nssm:
+ name: foo
+ application: C:\windows\\foo.exe
+ user: foouser
+ password: secret
+
+# Install the foo service but do not start it automatically
+- win_nssm:
name: foo
- application: C:\windows\foo.exe
- stdout_file: C:\windows\foo.log
- stderr_file: C:\windows\foo.log
+ application: C:\windows\\foo.exe
+ state: present
+ start_mode: manual
- # Remove the foo service
- win_nssm:
+# Remove the foo service
+- win_nssm:
name: foo
state: absent
'''
diff --git a/windows/win_owner.ps1 b/windows/win_owner.ps1
new file mode 100644
index 00000000000..076ab846052
--- /dev/null
+++ b/windows/win_owner.ps1
@@ -0,0 +1,136 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Hans-Joachim Kliemeck
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+#Functions
+Function UserSearch
+{
+ Param ([string]$accountName)
+ #Check if there's a realm specified
+
+ $searchDomain = $false
+ $searchDomainUPN = $false
+ if ($accountName.Split("\").count -gt 1)
+ {
+ if ($accountName.Split("\")[0] -ne $env:COMPUTERNAME)
+ {
+ $searchDomain = $true
+ $accountName = $accountName.split("\")[1]
+ }
+ }
+ Elseif ($accountName.contains("@"))
+ {
+ $searchDomain = $true
+ $searchDomainUPN = $true
+ }
+ Else
+ {
+ #Default to local user account
+ $accountName = $env:COMPUTERNAME + "\" + $accountName
+ }
+
+ if ($searchDomain -eq $false)
+ {
+ # do not use Win32_UserAccount, because e.g. SYSTEM (BUILTIN\SYSTEM or COMPUUTERNAME\SYSTEM) will not be listed. on Win32_Account groups will be listed too
+ $localaccount = get-wmiobject -class "Win32_Account" -namespace "root\CIMV2" -filter "(LocalAccount = True)" | where {$_.Caption -eq $accountName}
+ if ($localaccount)
+ {
+ return $localaccount.SID
+ }
+ }
+ Else
+ {
+ #Search by samaccountname
+ $Searcher = [adsisearcher]""
+
+ If ($searchDomainUPN -eq $false) {
+ $Searcher.Filter = "sAMAccountName=$($accountName)"
+ }
+ Else {
+ $Searcher.Filter = "userPrincipalName=$($accountName)"
+ }
+
+ $result = $Searcher.FindOne()
+ if ($result)
+ {
+ $user = $result.GetDirectoryEntry()
+
+ # get binary SID from AD account
+ $binarySID = $user.ObjectSid.Value
+
+ # convert to string SID
+ return (New-Object System.Security.Principal.SecurityIdentifier($binarySID,0)).Value
+ }
+ }
+}
+
+$params = Parse-Args $args;
+
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+$path = Get-Attr $params "path" -failifempty $true
+$user = Get-Attr $params "user" -failifempty $true
+$recurse = Get-Attr $params "recurse" "no" -validateSet "no","yes" -resultobj $result
+$recurse = $recurse | ConvertTo-Bool
+
+If (-Not (Test-Path -Path $path)) {
+ Fail-Json $result "$path file or directory does not exist on the host"
+}
+
+# Test that the user/group is resolvable on the local machine
+$sid = UserSearch -AccountName ($user)
+if (!$sid)
+{
+ Fail-Json $result "$user is not a valid user or group on the host machine or domain"
+}
+
+Try {
+ $objUser = New-Object System.Security.Principal.SecurityIdentifier($sid)
+
+ $file = Get-Item -Path $path
+ $acl = Get-Acl $file.FullName
+
+ If ($acl.getOwner([System.Security.Principal.SecurityIdentifier]) -ne $objUser) {
+ $acl.setOwner($objUser)
+ Set-Acl $file.FullName $acl
+
+ Set-Attr $result "changed" $true;
+ }
+
+ If ($recurse) {
+ $files = Get-ChildItem -Path $path -Force -Recurse
+ ForEach($file in $files){
+ $acl = Get-Acl $file.FullName
+
+ If ($acl.getOwner([System.Security.Principal.SecurityIdentifier]) -ne $objUser) {
+ $acl.setOwner($objUser)
+ Set-Acl $file.FullName $acl
+
+ Set-Attr $result "changed" $true;
+ }
+ }
+ }
+}
+Catch {
+ Fail-Json $result "an error occured when attempting to change owner on $path for $user"
+}
+
+Exit-Json $result
diff --git a/windows/win_owner.py b/windows/win_owner.py
new file mode 100644
index 00000000000..b3ad35b40a6
--- /dev/null
+++ b/windows/win_owner.py
@@ -0,0 +1,73 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015, Hans-Joachim Kliemeck
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: win_owner
+version_added: "2.1"
+short_description: Set owner
+description:
+ - Set owner of files or directories
+options:
+ path:
+ description:
+ - Path to be used for changing owner
+ required: true
+ user:
+ description:
+ - Name to be used for changing owner
+ required: true
+ recurse:
+ description:
+ - Indicates if the owner should be changed recursively
+ required: false
+ choices:
+ - no
+ - yes
+ default: no
+author: Hans-Joachim Kliemeck (@h0nIg)
+'''
+
+EXAMPLES = '''
+# Playbook example
+---
+- name: Change owner of Path
+ win_owner:
+ path: 'C:\\apache\\'
+ user: apache
+ recurse: yes
+
+- name: Set the owner of root directory
+ win_owner:
+ path: 'C:\\apache\\'
+ user: SYSTEM
+ recurse: no
+'''
+
+RETURN = '''
+
+'''
\ No newline at end of file
diff --git a/windows/win_package.ps1 b/windows/win_package.ps1
index 6cdc6bf6e5c..544c3660866 100644
--- a/windows/win_package.ps1
+++ b/windows/win_package.ps1
@@ -100,7 +100,15 @@ Function Throw-TerminatingError
[System.Management.Automation.ErrorRecord] $ErrorRecord
)
- $exception = new-object "System.InvalidOperationException" $Message,$ErrorRecord.Exception
+ if ($errorRecord)
+ {
+ $exception = new-object "System.InvalidOperationException" $Message,$ErrorRecord.Exception
+ }
+ Else
+ {
+ $exception = new-object "System.InvalidOperationException" $Message
+ }
+
$errorRecord = New-Object System.Management.Automation.ErrorRecord $exception,"MachineStateIncorrect","InvalidOperation",$null
throw $errorRecord
}
@@ -186,7 +194,19 @@ Function Validate-StandardArguments
try
{
Trace-Message "Parsing $ProductId as an identifyingNumber"
- $identifyingNumber = "{{{0}}}" -f [Guid]::Parse($ProductId).ToString().ToUpper()
+ $TestGuid = [system.guid]::NewGuid()
+ #Check to see if the productid is a guid
+ if ([guid]::TryParse($ProductId, [ref]$TestGuid))
+ {
+ $identifyingNumber = "{{{0}}}" -f [Guid]::Parse($ProductId).ToString().ToUpper()
+ Trace-Message "Parsed $ProductId as $identifyingNumber (is guid)"
+ }
+ Else
+ {
+ $identifyingNumber = $ProductId
+ Trace-Message "Parsed $ProductId as $identifyingNumber (is not guid)"
+ }
+
Trace-Message "Parsed $ProductId as $identifyingNumber"
}
catch
@@ -752,7 +772,7 @@ function Set-TargetResource
if($Ensure -eq "Present")
{
# check if Msi package contains the ProductName and Code specified
-
+ <#
$pName,$pCode = Get-MsiProductEntry -Path $Path
if (
@@ -762,6 +782,7 @@ function Set-TargetResource
{
Throw-InvalidNameOrIdException ($LocalizedData.InvalidNameOrId -f $Name,$identifyingNumber,$pName,$pCode)
}
+ #>
$startInfo.Arguments = '/i "{0}"' -f $Path
}
@@ -1287,24 +1308,19 @@ Else
}
catch
{
- $errormsg = $_[0].exception
+ $errormsg = $_
+ Fail-Json -obj $result -message $errormsg.ToString()
}
- if ($errormsg)
+ #Check if DSC thinks the computer needs a reboot:
+ if ((get-variable DSCMachinestatus -Scope Global -ea 0) -and ($global:DSCMachineStatus -eq 1))
{
- Fail-Json -obj $result -message $errormsg.ToString()
+ Set-Attr $result "restart_required" $true
}
- Else
- {
- #Check if DSC thinks the computer needs a reboot:
- if ($global:DSCMachineStatus -eq 1)
- {
- Set-Attr $result "restart_required" $true
- }
- #Set-TargetResource did its job. We can assume a change has happened
- Set-Attr $result "changed" $true
- Exit-Json -obj $result
- }
+ #Set-TargetResource did its job. We can assume a change has happened
+ Set-Attr $result "changed" $true
+ Exit-Json -obj $result
+
}
diff --git a/windows/win_package.py b/windows/win_package.py
index 68497d5ba4f..9c358fcd845 100644
--- a/windows/win_package.py
+++ b/windows/win_package.py
@@ -21,38 +21,40 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_package
version_added: "1.7"
author: Trond Hindenes
-short_description: Installs/Uninstalls a installable package, either from local file system or url
+short_description: Installs/Uninstalls an installable package, either from local file system or url
description:
- - Installs or uninstalls a package
+ - Installs or uninstalls a package.
+ - 'Optionally uses a product_id to check if the package needs installing. You can find product ids for installed programs in the windows registry either in C(HKLM:Software\\Microsoft\\Windows\CurrentVersion\\Uninstall) or for 32 bit programs C(HKLM:Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall)'
options:
path:
description:
- Location of the package to be installed (either on file system, network share or url)
required: true
- default: null
- aliases: []
name:
description:
- - name of the package. Just for logging reasons, will use the value of path if name isn't specified
+ - Name of the package, if name isn't specified the path will be used for log messages
required: false
default: null
- aliases: []
product_id:
description:
- product id of the installed package (used for checking if already installed)
+ - You can find product ids for installed programs in the windows registry either in C(HKLM:Software\\Microsoft\\Windows\CurrentVersion\\Uninstall) or for 32 bit programs C(HKLM:Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall)'
required: true
- default: null
aliases: [productid]
arguments:
description:
- Any arguments the installer needs
default: null
- aliases: []
+ required: false
state:
description:
- Install or Uninstall
@@ -60,29 +62,38 @@
- present
- absent
default: present
+ required: false
aliases: [ensure]
user_name:
description:
- Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.
default: null
- aliases: []
+ required: false
user_password:
description:
- Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.
default: null
- aliases: []
-author: Trond Hindenes
+ required: false
'''
EXAMPLES = '''
# Playbook example
- - name: Install the vc thingy
- win_package:
- name="Microsoft Visual C thingy"
- path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
- Product_Id="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
- Arguments="/install /passive /norestart"
-
+- name: Install the vc thingy
+ win_package:
+ name="Microsoft Visual C thingy"
+ path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
+ Product_Id="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
+ Arguments="/install /passive /norestart"
+# Install/uninstall an msi-based package
+- name: Install msi-based package (Remote Desktop Connection Manager)
+ win_package:
+ path: "https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi"
+ product_id: "{0240359E-6A4C-4884-9E94-B397A02D893C}"
+- name: Uninstall msi-based package
+ win_package:
+ path: "https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi"
+ product_id: "{0240359E-6A4C-4884-9E94-B397A02D893C}"
+ state: absent
'''
diff --git a/windows/win_regedit.ps1 b/windows/win_regedit.ps1
index 1a257413466..723a6c7b239 100644
--- a/windows/win_regedit.ps1
+++ b/windows/win_regedit.ps1
@@ -21,64 +21,34 @@ $ErrorActionPreference = "Stop"
# WANT_JSON
# POWERSHELL_COMMON
+New-PSDrive -PSProvider registry -Root HKEY_CLASSES_ROOT -Name HKCR -ErrorAction SilentlyContinue
+New-PSDrive -PSProvider registry -Root HKEY_USERS -Name HKU -ErrorAction SilentlyContinue
+New-PSDrive -PSProvider registry -Root HKEY_CURRENT_CONFIG -Name HCCC -ErrorAction SilentlyContinue
+
$params = Parse-Args $args;
$result = New-Object PSObject;
Set-Attr $result "changed" $false;
+Set-Attr $result "data_changed" $false;
+Set-Attr $result "data_type_changed" $false;
-If ($params.key)
-{
- $registryKey = $params.key
-}
-Else
-{
- Fail-Json $result "missing required argument: key"
-}
-
-If ($params.value)
-{
- $registryValue = $params.value
-}
-Else
-{
- $registryValue = $null
-}
-
-If ($params.state)
-{
- $state = $params.state.ToString().ToLower()
- If (($state -ne "present") -and ($state -ne "absent"))
- {
- Fail-Json $result "state is $state; must be present or absent"
- }
-}
-Else
-{
- $state = "present"
-}
+$registryKey = Get-Attr -obj $params -name "key" -failifempty $true
+$registryValue = Get-Attr -obj $params -name "value" -default $null
+$state = Get-Attr -obj $params -name "state" -validateSet "present","absent" -default "present"
+$registryData = Get-Attr -obj $params -name "data" -default $null
+$registryDataType = Get-Attr -obj $params -name "datatype" -validateSet "binary","dword","expandstring","multistring","string","qword" -default "string"
-If ($params.data)
-{
- $registryData = $params.data
-}
-ElseIf ($state -eq "present" -and $registryValue -ne $null)
+If ($state -eq "present" -and $registryData -eq $null -and $registryValue -ne $null)
{
Fail-Json $result "missing required argument: data"
}
-If ($params.datatype)
+# check the registry key is in powershell ps-drive format: HKLM, HKCU, HKU, HKCR, HCCC
+If (-not ($registryKey -match "^H[KC][CLU][MURC]{0,1}:\\"))
{
- $registryDataType = $params.datatype.ToString().ToLower()
- $validRegistryDataTypes = "binary", "dword", "expandstring", "multistring", "string", "qword"
- If ($validRegistryDataTypes -notcontains $registryDataType)
- {
- Fail-Json $result "type is $registryDataType; must be binary, dword, expandstring, multistring, string, or qword"
- }
-}
-Else
-{
- $registryDataType = "string"
+ Fail-Json $result "key: $registryKey is not a valid powershell path, see module documentation for examples."
}
+
Function Test-RegistryValueData {
Param (
[parameter(Mandatory=$true)]
@@ -95,19 +65,95 @@ Function Test-RegistryValueData {
}
}
+# Returns true if registry data matches.
+# Handles binary, integer(dword) and string registry data
+Function Compare-RegistryData {
+ Param (
+ [parameter(Mandatory=$true)]
+ [AllowEmptyString()]$ReferenceData,
+ [parameter(Mandatory=$true)]
+ [AllowEmptyString()]$DifferenceData
+ )
+
+ if ($ReferenceData -is [String] -or $ReferenceData -is [int]) {
+ if ($ReferenceData -eq $DifferenceData) {
+ return $true
+ } else {
+ return $false
+ }
+ } elseif ($ReferenceData -is [Object[]]) {
+ if (@(Compare-Object $ReferenceData $DifferenceData -SyncWindow 0).Length -eq 0) {
+ return $true
+ } else {
+ return $false
+ }
+ }
+}
+
+# Simplified version of Convert-HexStringToByteArray from
+# https://cyber-defense.sans.org/blog/2010/02/11/powershell-byte-array-hex-convert
+# Expects a hex in the format you get when you run reg.exe export,
+# and converts to a byte array so powershell can modify binary registry entries
+function Convert-RegExportHexStringToByteArray
+{
+ Param (
+ [parameter(Mandatory=$true)] [String] $String
+ )
+
+# remove 'hex:' from the front of the string if present
+$String = $String.ToLower() -replace '^hex\:', ''
+
+#remove whitespace and any other non-hex crud.
+$String = $String.ToLower() -replace '[^a-f0-9\\,x\-\:]',''
+
+# turn commas into colons
+$String = $String -replace ',',':'
+
+#Maybe there's nothing left over to convert...
+if ($String.Length -eq 0) { ,@() ; return }
+
+#Split string with or without colon delimiters.
+if ($String.Length -eq 1)
+{ ,@([System.Convert]::ToByte($String,16)) }
+elseif (($String.Length % 2 -eq 0) -and ($String.IndexOf(":") -eq -1))
+{ ,@($String -split '([a-f0-9]{2})' | foreach-object { if ($_) {[System.Convert]::ToByte($_,16)}}) }
+elseif ($String.IndexOf(":") -ne -1)
+{ ,@($String -split ':+' | foreach-object {[System.Convert]::ToByte($_,16)}) }
+else
+{ ,@() }
+
+}
+
+if($registryDataType -eq "binary" -and $registryData -ne $null -and $registryData -is [String]) {
+ $registryData = Convert-RegExportHexStringToByteArray($registryData)
+}
+
if($state -eq "present") {
if ((Test-Path $registryKey) -and $registryValue -ne $null)
{
if (Test-RegistryValueData -Path $registryKey -Value $registryValue)
{
+ # handle binary data
+ $currentRegistryData =(Get-ItemProperty -Path $registryKey | Select-Object -ExpandProperty $registryValue)
+
+ if ($registryValue.ToLower() -eq "(default)") {
+ # Special case handling for the key's default property. Because .GetValueKind() doesn't work for the (default) key property
+ $oldRegistryDataType = "String"
+ }
+ else {
+ $oldRegistryDataType = (Get-Item $registryKey).GetValueKind($registryValue)
+ }
+
# Changes Data and DataType
- if ((Get-Item $registryKey).GetValueKind($registryValue) -ne $registryDataType)
+ if ($registryDataType -ne $oldRegistryDataType)
{
Try
{
Remove-ItemProperty -Path $registryKey -Name $registryValue
New-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData -PropertyType $registryDataType
$result.changed = $true
+ $result.data_changed = $true
+ $result.data_type_changed = $true
}
Catch
{
@@ -115,11 +161,12 @@ if($state -eq "present") {
}
}
# Changes Only Data
- elseif ((Get-ItemProperty -Path $registryKey | Select-Object -ExpandProperty $registryValue) -ne $registryData)
+ elseif (-Not (Compare-RegistryData -ReferenceData $currentRegistryData -DifferenceData $registryData))
{
Try {
Set-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData
$result.changed = $true
+ $result.data_changed = $true
}
Catch
{
@@ -142,7 +189,7 @@ if($state -eq "present") {
}
elseif(-not (Test-Path $registryKey))
{
- Try
+ Try
{
$newRegistryKey = New-Item $registryKey -Force
$result.changed = $true
diff --git a/windows/win_regedit.py b/windows/win_regedit.py
index 5087a5eaa8f..693b4c2f370 100644
--- a/windows/win_regedit.py
+++ b/windows/win_regedit.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_regedit
@@ -43,7 +47,7 @@
aliases: []
data:
description:
- - Registry Value Data
+ - Registry Value Data. Binary data should be expressed a yaml byte array or as comma separated hex values. An easy way to generate this is to run C(regedit.exe) and use the I(Export) option to save the registry values to a file. In the exported file binary values will look like C(hex:be,ef,be,ef). The C(hex:) prefix is optional.
required: false
default: null
aliases: []
@@ -94,6 +98,26 @@
data: 1337
datatype: dword
+ # Creates Registry Key called MyCompany,
+ # a value within MyCompany Key called "hello", and
+ # binary data for the value "hello" as type "binary"
+ # data expressed as comma separated list
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ value: hello
+ data: hex:be,ef,be,ef,be,ef,be,ef,be,ef
+ datatype: binary
+
+ # Creates Registry Key called MyCompany,
+ # a value within MyCompany Key called "hello", and
+ # binary data for the value "hello" as type "binary"
+ # data expressed as yaml array of bytes
+ win_regedit:
+ key: HKCU:\Software\MyCompany
+ value: hello
+ data: [0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef]
+ datatype: binary
+
# Delete Registry Key MyCompany
# NOTE: Not specifying a value will delete the root key which means
# all values will be deleted
@@ -106,4 +130,22 @@
key: HKCU:\Software\MyCompany
value: hello
state: absent
+
+ # Ensure registry paths containing spaces are quoted.
+ # Creates Registry Key called 'My Company'.
+ win_regedit:
+ key: 'HKCU:\Software\My Company'
+
+'''
+RETURN = '''
+data_changed:
+ description: whether this invocation changed the data in the registry value
+ returned: success
+ type: boolean
+ sample: False
+data_type_changed:
+ description: whether this invocation changed the datatype of the registry value
+ returned: success
+ type: boolean
+ sample: True
'''
diff --git a/windows/win_regmerge.ps1 b/windows/win_regmerge.ps1
new file mode 100644
index 00000000000..87e73a69773
--- /dev/null
+++ b/windows/win_regmerge.ps1
@@ -0,0 +1,100 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Jon Hawkesworth (@jhawkesworth)
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+Function Convert-RegistryPath {
+ Param (
+ [parameter(Mandatory=$True)]
+ [ValidateNotNullOrEmpty()]$Path
+ )
+
+ $output = $Path -replace "HKLM:", "HKLM"
+ $output = $output -replace "HKCU:", "HKCU"
+
+ Return $output
+}
+
+$params = Parse-Args $args
+$result = New-Object PSObject
+Set-Attr $result "changed" $False
+
+$path = Get-Attr -obj $params -name path -failifempty $True -resultobj $result
+$compare_to = Get-Attr -obj $params -name compare_to -failifempty $False -resultobj $result
+
+# check it looks like a reg key, warn if key not present - will happen first time
+# only accepting PS-Drive style key names (starting with HKLM etc, not HKEY_LOCAL_MACHINE etc)
+
+$do_comparison = $False
+
+If ($compare_to) {
+ $compare_to_key = $params.compare_to.ToString()
+ If (Test-Path $compare_to_key -pathType container ) {
+ $do_comparison = $True
+ } Else {
+ Set-Attr $result "compare_to_key_found" $False
+ }
+}
+
+If ( $do_comparison -eq $True ) {
+ $guid = [guid]::NewGuid()
+ $exported_path = $env:TEMP + "\" + $guid.ToString() + 'ansible_win_regmerge.reg'
+
+ $expanded_compare_key = Convert-RegistryPath ($compare_to_key)
+
+ # export from the reg key location to a file
+ $reg_args = @("EXPORT", "$expanded_compare_key", $exported_path)
+ & reg.exe $reg_args
+
+ # compare the two files
+ $comparison_result = Compare-Object -ReferenceObject $(Get-Content $path) -DifferenceObject $(Get-Content $exported_path)
+
+ If (Get-Member -InputObject $comparison_result -Name "count" -MemberType Properties )
+ {
+ # Something is different, actually do reg merge
+ $reg_import_args = @("IMPORT", "$path")
+ $ret = & reg.exe $reg_import_args 2>&1
+ If ($LASTEXITCODE -eq 0) {
+ Set-Attr $result "changed" $True
+ Set-Attr $result "difference_count" $comparison_result.count
+ } Else {
+ Set-Attr $result "rc" $LASTEXITCODE
+ Fail-Json $result "$ret"
+ }
+ } Else {
+ Set-Attr $result "difference_count" 0
+ }
+
+ Remove-Item $exported_path
+ Set-Attr $result "compared" $True
+
+} Else {
+ # not comparing, merge and report changed
+ $reg_import_args = @("IMPORT", "$path")
+ $ret = & reg.exe $reg_import_args 2>&1
+ If ( $LASTEXITCODE -eq 0 ) {
+ Set-Attr $result "changed" $True
+ Set-Attr $result "compared" $False
+ } Else {
+ Set-Attr $result "rc" $LASTEXITCODE
+ Fail-Json $result "$ret"
+ }
+}
+
+Exit-Json $result
diff --git a/windows/win_regmerge.py b/windows/win_regmerge.py
new file mode 100644
index 00000000000..cefc98029a4
--- /dev/null
+++ b/windows/win_regmerge.py
@@ -0,0 +1,91 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Jon Hawkesworth (@jhawkesworth)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: win_regmerge
+version_added: "2.1"
+short_description: Merges the contents of a registry file into the windows registry
+description:
+ - Wraps the reg.exe command to import the contents of a registry file.
+ - Suitable for use with registry files created using M(win_template).
+ - Windows registry files have a specific format and must be constructed correctly with carriage return and line feed line endings otherwise they will not be merged.
+ - Exported registry files often start with a Byte Order Mark which must be removed if the file is to templated using M(win_template).
+ - Registry file format is described at U(https://support.microsoft.com/en-us/kb/310516)
+ - See also M(win_template), M(win_regedit)
+options:
+ path:
+ description:
+ - The full path including file name to the registry file on the remote machine to be merged
+ required: true
+ default: no default
+ compare_key:
+ description:
+ - The parent key to use when comparing the contents of the registry to the contents of the file. Needs to be in HKLM or HKCU part of registry. Use a PS-Drive style path for example HKLM:\SOFTWARE not HKEY_LOCAL_MACHINE\SOFTWARE
+ If not supplied, or the registry key is not found, no comparison will be made, and the module will report changed.
+ required: false
+ default: no default
+author: "Jon Hawkesworth (@jhawkesworth)"
+notes:
+ - Organise your registry files so that they contain a single root registry
+ key if you want to use the compare_to functionality.
+ This module does not force registry settings to be in the state
+ described in the file. If registry settings have been modified externally
+ the module will merge the contents of the file but continue to report
+ differences on subsequent runs.
+ To force registry change, use M(win_regedit) with state=absent before
+ using M(win_regmerge).
+'''
+
+EXAMPLES = '''
+ # Merge in a registry file without comparing to current registry
+ # Note that paths using / to separate are preferred as they require less special handling than \
+ win_regmerge:
+ path: C:/autodeploy/myCompany-settings.reg
+ # Compare and merge registry file
+ win_regmerge:
+ path: C:/autodeploy/myCompany-settings.reg
+ compare_to: HKLM:\SOFTWARE\myCompany
+'''
+
+RETURN = '''
+compare_to_key_found:
+ description: whether the parent registry key has been found for comparison
+ returned: when comparison key not found in registry
+ type: boolean
+ sample: false
+difference_count:
+ description: number of differences between the registry and the file
+ returned: changed
+ type: integer
+ sample: 1
+compared:
+ description: whether a comparison has taken place between the registry and the file
+ returned: when a comparison key has been supplied and comparison has been attempted
+ type: boolean
+ sample: true
+'''
diff --git a/windows/win_robocopy.ps1 b/windows/win_robocopy.ps1
new file mode 100644
index 00000000000..69cf9ee3e3a
--- /dev/null
+++ b/windows/win_robocopy.ps1
@@ -0,0 +1,147 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Corwin Brown
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$result = New-Object psobject @{
+ win_robocopy = New-Object psobject @{
+ recurse = $false
+ purge = $false
+ }
+ changed = $false
+}
+
+$src = Get-AnsibleParam -obj $params -name "src" -failifempty $true
+$dest = Get-AnsibleParam -obj $params -name "dest" -failifempty $true
+$purge = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "purge" -default $false)
+$recurse = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "recurse" -default $false)
+$flags = Get-AnsibleParam -obj $params -name "flags" -default $null
+$_ansible_check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -default $false
+
+# Search for an Error Message
+# Robocopy seems to display an error after 3 '-----' separator lines
+Function SearchForError($cmd_output, $default_msg) {
+ $separator_count = 0
+ $error_msg = $default_msg
+ ForEach ($line in $cmd_output) {
+ if (-Not $line) {
+ continue
+ }
+
+ if ($separator_count -ne 3) {
+ if (Select-String -InputObject $line -pattern "^(\s+)?(\-+)(\s+)?$") {
+ $separator_count += 1
+ }
+ }
+ Else {
+ If (Select-String -InputObject $line -pattern "error") {
+ $error_msg = $line
+ break
+ }
+ }
+ }
+
+ return $error_msg
+}
+
+# Build Arguments
+$robocopy_opts = @()
+
+if (-Not (Test-Path $src)) {
+ Fail-Json $result "$src does not exist!"
+}
+
+$robocopy_opts += $src
+Set-Attr $result.win_robocopy "src" $src
+
+$robocopy_opts += $dest
+Set-Attr $result.win_robocopy "dest" $dest
+
+if ($flags -eq $null) {
+ if ($purge) {
+ $robocopy_opts += "/purge"
+ }
+
+ if ($recurse) {
+ $robocopy_opts += "/e"
+ }
+}
+Else {
+ $robocopy_opts += $flags
+}
+
+Set-Attr $result.win_robocopy "purge" $purge
+Set-Attr $result.win_robocopy "recurse" $recurse
+Set-Attr $result.win_robocopy "flags" $flags
+
+$robocopy_output = ""
+$rc = 0
+If ($_ansible_check_mode -eq $true) {
+ $robocopy_output = "Would have copied the contents of $src to $dest"
+ $rc = 0
+}
+Else {
+ Try {
+ &robocopy $robocopy_opts | Tee-Object -Variable robocopy_output | Out-Null
+ $rc = $LASTEXITCODE
+ }
+ Catch {
+ $ErrorMessage = $_.Exception.Message
+ Fail-Json $result "Error synchronizing $src to $dest! Msg: $ErrorMessage"
+ }
+}
+
+Set-Attr $result.win_robocopy "return_code" $rc
+Set-Attr $result.win_robocopy "output" $robocopy_output
+
+$cmd_msg = "Success"
+If ($rc -eq 0) {
+ $cmd_msg = "No files copied."
+}
+ElseIf ($rc -eq 1) {
+ $cmd_msg = "Files copied successfully!"
+ $changed = $true
+}
+ElseIf ($rc -eq 2) {
+ $cmd_msg = "Extra files or directories were detected!"
+ $changed = $true
+}
+ElseIf ($rc -eq 4) {
+ $cmd_msg = "Some mismatched files or directories were detected!"
+ $changed = $true
+}
+ElseIf ($rc -eq 8) {
+ $error_msg = SearchForError $robocopy_output "Some files or directories could not be copied!"
+ Fail-Json $result $error_msg
+}
+ElseIf ($rc -eq 10) {
+ $error_msg = SearchForError $robocopy_output "Serious Error! No files were copied! Do you have permissions to access $src and $dest?"
+ Fail-Json $result $error_msg
+}
+ElseIf ($rc -eq 16) {
+ $error_msg = SearchForError $robocopy_output "Fatal Error!"
+ Fail-Json $result $error_msg
+}
+
+Set-Attr $result.win_robocopy "msg" $cmd_msg
+Set-Attr $result.win_robocopy "changed" $changed
+
+Exit-Json $result
diff --git a/windows/win_robocopy.py b/windows/win_robocopy.py
new file mode 100644
index 00000000000..c29c07604bb
--- /dev/null
+++ b/windows/win_robocopy.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Corwin Brown
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: win_robocopy
+version_added: "2.2"
+short_description: Synchronizes the contents of two directories using Robocopy.
+description:
+ - Synchronizes the contents of two directories on the remote machine. Under the hood this just calls out to RoboCopy, since that should be available on most modern Windows Systems.
+options:
+ src:
+ description:
+ - Source file/directory to sync.
+ required: true
+ dest:
+ description:
+ - Destination file/directory to sync (Will receive contents of src).
+ required: true
+ recurse:
+ description:
+ - Includes all subdirectories (Toggles the `/e` flag to RoboCopy). If "flags" is set, this will be ignored.
+ choices:
+ - true
+ - false
+ defaults: false
+ required: false
+ purge:
+ description:
+ - Deletes any files/directories found in the destination that do not exist in the source (Toggles the `/purge` flag to RoboCopy). If "flags" is set, this will be ignored.
+ choices:
+ - true
+ - false
+ defaults: false
+ required: false
+ flags:
+ description:
+ - Directly supply Robocopy flags. If set, purge and recurse will be ignored.
+ default: None
+ required: false
+author: Corwin Brown (@blakfeld)
+notes:
+ - This is not a complete port of the "synchronize" module. Unlike the "synchronize" module this only performs the sync/copy on the remote machine, not from the master to the remote machine.
+ - This module does not currently support all Robocopy flags.
+ - Works on Windows 7, Windows 8, Windows Server 2k8, and Windows Server 2k12
+"""
+
+EXAMPLES = """
+# Syncs the contents of one diretory to another.
+$ ansible -i hosts all -m win_robocopy -a "src=C:\\DirectoryOne dest=C:\\DirectoryTwo"
+
+# Sync the contents of one directory to another, including subdirectories.
+$ ansible -i hosts all -m win_robocopy -a "src=C:\\DirectoryOne dest=C:\\DirectoryTwo recurse=true"
+
+# Sync the contents of one directory to another, and remove any files/directories found in destination that do not exist in the source.
+$ ansible -i hosts all -m win_robocopy -a "src=C:\\DirectoryOne dest=C:\\DirectoryTwo purge=true"
+
+# Sample sync
+---
+- name: Sync Two Directories
+ win_robocopy:
+ src: "C:\\DirectoryOne
+ dest: "C:\\DirectoryTwo"
+ recurse: true
+ purge: true
+
+---
+- name: Sync Two Directories
+ win_robocopy:
+ src: "C:\\DirectoryOne
+ dest: "C:\\DirectoryTwo"
+ recurse: true
+ purge: true
+ flags: '/XD SOME_DIR /XF SOME_FILE /MT:32'
+"""
+
+RETURN = '''
+src:
+ description: The Source file/directory of the sync.
+ returned: always
+ type: string
+ sample: "c:/Some/Path"
+dest:
+ description: The Destination file/directory of the sync.
+ returned: always
+ type: string
+ sample: "c:/Some/Path"
+recurse:
+ description: Whether or not the recurse flag was toggled.
+ returned: always
+ type: bool
+ sample: False
+purge:
+ description: Whether or not the purge flag was toggled.
+ returned: always
+ type: bool
+ sample: False
+flags:
+ description: Any flags passed in by the user.
+ returned: always
+ type: string
+ sample: "/e /purge"
+return_code:
+ description: The return code retuned by robocopy.
+ returned: success
+ type: int
+ sample: 1
+output:
+ description: The output of running the robocopy command.
+ returned: success
+ type: string
+ sample: "-------------------------------------------------------------------------------\n ROBOCOPY :: Robust File Copy for Windows \n-------------------------------------------------------------------------------\n"
+msg:
+ description: Output intrepreted into a concise message.
+ returned: always
+ type: string
+ sample: No files copied!
+changed:
+ description: Whether or not any changes were made.
+ returned: always
+ type: bool
+ sample: False
+'''
diff --git a/windows/win_say.ps1 b/windows/win_say.ps1
new file mode 100644
index 00000000000..2a1a0c18aa5
--- /dev/null
+++ b/windows/win_say.ps1
@@ -0,0 +1,106 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2016, Jon Hawkesworth (@jhawkesworth)
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+$result = New-Object PSObject;
+$msg = Get-AnsibleParam -obj $params -name "msg"
+$msg_file = Get-AnsibleParam -obj $params -name "msg_file"
+$start_sound_path = Get-AnsibleParam -obj $params -name "start_sound_path"
+$end_sound_path = Get-AnsibleParam -obj $params -name "end_sound_path"
+$voice = Get-AnsibleParam -obj $params -name "voice"
+$speech_speed = Get-AnsibleParam -obj $params -name "speech_speed"
+$speed = 0
+$words = $null
+
+if ($speech_speed -ne $null) {
+ try {
+ $speed = [convert]::ToInt32($speech_speed, 10)
+ } catch {
+ Fail-Json $result "speech_speed needs to a integer in the range -10 to 10. The value $speech_speed could not be converted to an integer."
+
+ }
+ if ($speed -lt -10 -or $speed -gt 10) {
+ Fail-Json $result "speech_speed needs to a integer in the range -10 to 10. The value $speech_speed is outside this range."
+ }
+}
+
+
+if ($msg_file -ne $null -and $msg -ne $null ) {
+ Fail-Json $result "Please specify either msg_file or msg parameters, not both"
+}
+
+if ($msg_file -eq $null -and $msg -eq $null -and $start_sound_path -eq $null -and $end_sound_path -eq $null) {
+ Fail-Json $result "No msg_file, msg, start_sound_path, or end_sound_path parameters have been specified. Please specify at least one so the module has something to do"
+
+}
+
+
+if ($msg_file -ne $null) {
+ if (Test-Path $msg_file) {
+ $words = Get-Content $msg_file | Out-String
+ } else {
+ Fail-Json $result "Message file $msg_file could not be found or opened. Ensure you have specified the full path to the file, and the ansible windows user has permission to read the file."
+ }
+}
+
+if ($start_sound_path -ne $null) {
+ if (Test-Path $start_sound_path) {
+ (new-object Media.SoundPlayer $start_sound_path).playSync();
+ } else {
+ Fail-Json $result "Start sound file $start_sound_path could not be found or opened. Ensure you have specified the full path to the file, and the ansible windows user has permission to read the file."
+ }
+}
+
+if ($msg -ne $null) {
+ $words = $msg
+}
+
+if ($words -ne $null) {
+ Add-Type -AssemblyName System.speech
+ $tts = New-Object System.Speech.Synthesis.SpeechSynthesizer
+ if ($voice -ne $null) {
+ try {
+ $tts.SelectVoice($voice)
+ } catch [System.Management.Automation.MethodInvocationException] {
+ Set-Attr $result "voice_info" "Could not load voice $voice, using system default voice."
+ }
+ }
+
+ Set-Attr $result "voice" $tts.Voice.Name
+ if ($speed -ne 0) {
+ $tts.Rate = $speed
+ }
+ $tts.Speak($words)
+ $tts.Dispose()
+}
+
+if ($end_sound_path -ne $null) {
+ if (Test-Path $end_sound_path) {
+ (new-object Media.SoundPlayer $end_sound_path).playSync();
+ } else {
+ Fail-Json $result "End sound file $start_sound_path could not be found or opened. Ensure you have specified the full path to the file, and the ansible windows user has permission to read the file."
+ }
+}
+
+Set-Attr $result "changed" $false;
+Set-Attr $result "message_text" $words;
+
+Exit-Json $result;
diff --git a/windows/win_say.py b/windows/win_say.py
new file mode 100644
index 00000000000..61fa74b9c87
--- /dev/null
+++ b/windows/win_say.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Jon Hawkesworth (@jhawkesworth)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: win_say
+version_added: "2.3"
+short_description: Text to speech module for Windows to speak messages and optionally play sounds
+description:
+ - Uses .NET libraries to convert text to speech and optionally play .wav sounds. Audio Service needs to be running and some kind of speakers or headphones need to be attached to the windows target(s) for the speech to be audible.
+options:
+ msg:
+ description:
+ - The text to be spoken. Use either msg or msg_file. Optional so that you can use this module just to play sounds.
+ required: false
+ default: none
+ msg_file:
+ description:
+ - Full path to a windows format text file containing the text to be spokend. Use either msg or msg_file. Optional so that you can use this module just to play sounds.
+ required: false
+ default: none
+ voice:
+ description:
+ - Which voice to use. See notes for how to discover installed voices. If the requested voice is not available the default voice will be used. Example voice names from Windows 10 are 'Microsoft Zira Desktop' and 'Microsoft Hazel Desktop'.
+ required: false
+ default: system default voice
+ speech_speed:
+ description:
+ - How fast or slow to speak the text. Must be an integer value in the range -10 to 10. -10 is slowest, 10 is fastest.
+ required: false
+ default: 0
+ start_sound_path:
+ description:
+ - Full path to a C(.wav) file containing a sound to play before the text is spoken. Useful on conference calls to alert other speakers that ansible has something to say.
+ required: false
+ default: null
+ end_sound_path:
+ description:
+ - Full path to a C(.wav) file containing a sound to play after the text has been spoken. Useful on conference calls to alert other speakers that ansible has finished speaking.
+ required: false
+ default: null
+author: "Jon Hawkesworth (@jhawkesworth)"
+notes:
+ - Needs speakers or headphones to do anything useful.
+ - To find which voices are installed, run the following powershell
+ Add-Type -AssemblyName System.Speech
+ $speech = New-Object -TypeName System.Speech.Synthesis.SpeechSynthesizer
+ $speech.GetInstalledVoices() | ForEach-Object { $_.VoiceInfo }
+ $speech.Dispose()
+ - Speech can be surprisingly slow, so its best to keep message text short.
+'''
+
+EXAMPLES = '''
+ # Warn of impending deployment
+- win_say:
+ msg: Warning, deployment commencing in 5 minutes, please log out.
+ # Using a different voice and a start sound
+- win_say:
+ start_sound_path: 'C:\Windows\Media\ding.wav'
+ msg: Warning, deployment commencing in 5 minutes, please log out.
+ voice: Microsoft Hazel Desktop
+ # example with start and end sound
+- win_say:
+ start_sound_path: 'C:\Windows\Media\Windows Balloon.wav'
+ msg: "New software installed"
+ end_sound_path: 'C:\Windows\Media\chimes.wav'
+ # text from file example
+- win_say:
+ start_sound_path: 'C:\Windows\Media\Windows Balloon.wav'
+ msg_file: AppData\Local\Temp\morning_report.txt
+ end_sound_path: 'C:\Windows\Media\chimes.wav'
+'''
+RETURN = '''
+message_text:
+ description: the text that the module attempted to speak
+ returned: success
+ type: string
+ sample: "Warning, deployment commencing in 5 minutes."
+voice:
+ description: the voice used to speak the text.
+ returned: success
+ type: string
+ sample: Microsoft Hazel Desktop
+voice_info:
+ description: the voice used to speak the text.
+ returned: when requested voice could not be loaded
+ type: string
+ sample: Could not load voice TestVoice, using system default voice
+'''
+
diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1
index b63bd130134..70ba45e29d3 100644
--- a/windows/win_scheduled_task.ps1
+++ b/windows/win_scheduled_task.ps1
@@ -23,69 +23,52 @@ $ErrorActionPreference = "Stop"
# POWERSHELL_COMMON
$params = Parse-Args $args;
+
+$days_of_week = Get-AnsibleParam $params -name "days_of_week"
+$enabled = Get-AnsibleParam $params -name "enabled" -default $true
+$enabled = $enabled | ConvertTo-Bool
+$description = Get-AnsibleParam $params -name "description" -default " "
+$path = Get-AnsibleParam $params -name "path"
+$argument = Get-AnsibleParam $params -name "argument"
+
$result = New-Object PSObject;
Set-Attr $result "changed" $false;
#Required vars
-$name = Get-Attr -obj $params -name name -failifempty $true -resultobj $result
-$state = Get-Attr -obj $params -name state -failifempty $true -resultobj $result
-if( ($state -ne "present") -and ($state -ne "absent") ) {
- Fail-Json $result "state must be present or absent"
-}
+$name = Get-AnsibleParam -obj $params -name name -failifempty $true -resultobj $result
+$state = Get-AnsibleParam -obj $params -name state -failifempty $true -resultobj $result -validateSet "present","absent"
#Vars conditionally required
-if($state -eq "present") {
- $execute = Get-Attr -obj $params -name execute -failifempty $true -resultobj $result
- $frequency = Get-Attr -obj $params -name frequency -failifempty $true -resultobj $result
- $time = Get-Attr -obj $params -name time -failifempty $true -resultobj $result
- $user = Get-Attr -obj $params -name user -failifempty $true -resultobj $result
-}
-if ($params.days_of_week)
-{
- $days_of_week = $params.days_of_week
-}
-elseif ($frequency -eq "weekly")
-{
- Fail-Json $result "missing required argument: days_of_week"
-}
+$present_args_required = $state -eq "present"
+$execute = Get-AnsibleParam -obj $params -name execute -failifempty $present_args_required -resultobj $result
+$frequency = Get-AnsibleParam -obj $params -name frequency -failifempty $present_args_required -resultobj $result
+$time = Get-AnsibleParam -obj $params -name time -failifempty $present_args_required -resultobj $result
+$user = Get-AnsibleParam -obj $params -name user -failifempty $present_args_required -resultobj $result
-# Vars with defaults
-if ($params.enabled)
-{
- $enabled = $params.enabled | ConvertTo-Bool
-}
-else
-{
- $enabled = $true #default
-}
-if ($params.description)
-{
- $description = $params.description
-}
-else
+
+# Mandatory Vars
+if ($frequency -eq "weekly")
{
- $description = " " #default
+ if (!($days_of_week))
+ {
+ Fail-Json $result "missing required argument: days_of_week"
+ }
}
-if ($params.path)
+
+if ($path)
{
- $path = "\{0}\" -f $params.path
+ $path = "\{0}\" -f $path
}
else
{
$path = "\" #default
}
-# Optional vars
-if ($params.argument)
-{
- $argument = $params.argument
-}
-
try {
$task = Get-ScheduledTask -TaskPath "$path" | Where-Object {$_.TaskName -eq "$name"}
# Correlate task state to enable variable, used to calculate if state needs to be changed
- $taskState = $task.State
+ $taskState = if ($task) { $task.State } else { $null }
if ($taskState -eq "Ready"){
$taskState = $true
}
diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py
index e26cbc00cf0..96a9b48f951 100644
--- a/windows/win_scheduled_task.py
+++ b/windows/win_scheduled_task.py
@@ -18,6 +18,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_scheduled_task
@@ -25,6 +29,8 @@
short_description: Manage scheduled tasks
description:
- Manage scheduled tasks
+notes:
+ - This module requires Windows Server 2012 or later.
options:
name:
description:
@@ -77,11 +83,20 @@
required: false
path:
description:
- - Folder path of scheduled task
+ - Task folder in which this task will be stored
default: '\'
'''
EXAMPLES = '''
- # Create a scheduled task to open a command prompt
- win_scheduled_task: name="TaskName" execute="cmd" frequency="daily" time="9am" description="open command prompt" path="example" enable=yes state=present user=SYSTEM
+# Create a scheduled task to open a command prompt
+- win_scheduled_task:
+ name: TaskName
+ execute: cmd
+ frequency: daily
+ time: 9am
+ description: open command prompt
+ path: example
+ enable: yes
+ state: present
+ user: SYSTEM
'''
diff --git a/windows/win_share.ps1 b/windows/win_share.ps1
new file mode 100644
index 00000000000..59e4e8ab810
--- /dev/null
+++ b/windows/win_share.ps1
@@ -0,0 +1,251 @@
+#!powershell
+# This file is part of Ansible
+
+# Copyright 2015, Hans-Joachim Kliemeck
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+#Functions
+Function UserSearch
+{
+ Param ([string]$accountName)
+ #Check if there's a realm specified
+
+ $searchDomain = $false
+ $searchDomainUPN = $false
+ if ($accountName.Split("\").count -gt 1)
+ {
+ if ($accountName.Split("\")[0] -ne $env:COMPUTERNAME)
+ {
+ $searchDomain = $true
+ $accountName = $accountName.split("\")[1]
+ }
+ }
+ Elseif ($accountName.contains("@"))
+ {
+ $searchDomain = $true
+ $searchDomainUPN = $true
+ }
+ Else
+ {
+ #Default to local user account
+ $accountName = $env:COMPUTERNAME + "\" + $accountName
+ }
+
+ if ($searchDomain -eq $false)
+ {
+ # do not use Win32_UserAccount, because e.g. SYSTEM (BUILTIN\SYSTEM or COMPUUTERNAME\SYSTEM) will not be listed. on Win32_Account groups will be listed too
+ $localaccount = get-wmiobject -class "Win32_Account" -namespace "root\CIMV2" -filter "(LocalAccount = True)" | where {$_.Caption -eq $accountName}
+ if ($localaccount)
+ {
+ return $localaccount.SID
+ }
+ }
+ Else
+ {
+ #Search by samaccountname
+ $Searcher = [adsisearcher]""
+
+ If ($searchDomainUPN -eq $false) {
+ $Searcher.Filter = "sAMAccountName=$($accountName)"
+ }
+ Else {
+ $Searcher.Filter = "userPrincipalName=$($accountName)"
+ }
+
+ $result = $Searcher.FindOne()
+ if ($result)
+ {
+ $user = $result.GetDirectoryEntry()
+
+ # get binary SID from AD account
+ $binarySID = $user.ObjectSid.Value
+
+ # convert to string SID
+ return (New-Object System.Security.Principal.SecurityIdentifier($binarySID,0)).Value
+ }
+ }
+}
+Function NormalizeAccounts
+{
+ param(
+ [parameter(valuefrompipeline=$true)]
+ $users
+ )
+
+ $users = $users.Trim()
+ If ($users -eq "") {
+ $splittedUsers = [Collections.Generic.List[String]] @()
+ }
+ Else {
+ $splittedUsers = [Collections.Generic.List[String]] $users.Split(",")
+ }
+
+ $normalizedUsers = [Collections.Generic.List[String]] @()
+ ForEach($splittedUser in $splittedUsers) {
+ $sid = UserSearch $splittedUser
+ If (!$sid) {
+ Fail-Json $result "$splittedUser is not a valid user or group on the host machine or domain"
+ }
+
+ $normalizedUser = (New-Object System.Security.Principal.SecurityIdentifier($sid)).Translate([System.Security.Principal.NTAccount])
+ $normalizedUsers.Add($normalizedUser)
+ }
+
+ return ,$normalizedUsers
+}
+
+$params = Parse-Args $args;
+
+$result = New-Object PSObject;
+Set-Attr $result "changed" $false;
+
+$name = Get-Attr $params "name" -failifempty $true
+$state = Get-Attr $params "state" "present" -validateSet "present","absent" -resultobj $result
+
+Try {
+ $share = Get-SmbShare $name -ErrorAction SilentlyContinue
+ If ($state -eq "absent") {
+ If ($share) {
+ Remove-SmbShare -Force -Name $name
+ Set-Attr $result "changed" $true;
+ }
+ }
+ Else {
+ $path = Get-Attr $params "path" -failifempty $true
+ $description = Get-Attr $params "description" ""
+
+ $permissionList = Get-Attr $params "list" "no" -validateSet "no","yes" -resultobj $result | ConvertTo-Bool
+ $folderEnum = if ($permissionList) { "Unrestricted" } else { "AccessBased" }
+
+ $permissionRead = Get-Attr $params "read" "" | NormalizeAccounts
+ $permissionChange = Get-Attr $params "change" "" | NormalizeAccounts
+ $permissionFull = Get-Attr $params "full" "" | NormalizeAccounts
+ $permissionDeny = Get-Attr $params "deny" "" | NormalizeAccounts
+
+ If (-Not (Test-Path -Path $path)) {
+ Fail-Json $result "$path directory does not exist on the host"
+ }
+
+ # normalize path and remove slash at the end
+ $path = (Get-Item $path).FullName -replace "\\$"
+
+ # need to (re-)create share
+ If (!$share) {
+ New-SmbShare -Name $name -Path $path
+ $share = Get-SmbShare $name -ErrorAction SilentlyContinue
+
+ Set-Attr $result "changed" $true;
+ }
+ If ($share.Path -ne $path) {
+ Remove-SmbShare -Force -Name $name
+
+ New-SmbShare -Name $name -Path $path
+ $share = Get-SmbShare $name -ErrorAction SilentlyContinue
+
+ Set-Attr $result "changed" $true;
+ }
+
+ # updates
+ If ($share.Description -ne $description) {
+ Set-SmbShare -Force -Name $name -Description $description
+ Set-Attr $result "changed" $true;
+ }
+ If ($share.FolderEnumerationMode -ne $folderEnum) {
+ Set-SmbShare -Force -Name $name -FolderEnumerationMode $folderEnum
+ Set-Attr $result "changed" $true;
+ }
+
+ # clean permissions that imply others
+ ForEach ($user in $permissionFull) {
+ $permissionChange.remove($user)
+ $permissionRead.remove($user)
+ }
+ ForEach ($user in $permissionChange) {
+ $permissionRead.remove($user)
+ }
+
+ # remove permissions
+ $permissions = Get-SmbShareAccess -Name $name
+ ForEach ($permission in $permissions) {
+ If ($permission.AccessControlType -eq "Deny") {
+ If (!$permissionDeny.Contains($permission.AccountName)) {
+ Unblock-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName
+ Set-Attr $result "changed" $true;
+ }
+ }
+ ElseIf ($permission.AccessControlType -eq "Allow") {
+ If ($permission.AccessRight -eq "Full") {
+ If (!$permissionFull.Contains($permission.AccountName)) {
+ Revoke-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName
+ Set-Attr $result "changed" $true;
+
+ Continue
+ }
+
+ # user got requested permissions
+ $permissionFull.remove($permission.AccountName)
+ }
+ ElseIf ($permission.AccessRight -eq "Change") {
+ If (!$permissionChange.Contains($permission.AccountName)) {
+ Revoke-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName
+ Set-Attr $result "changed" $true;
+
+ Continue
+ }
+
+ # user got requested permissions
+ $permissionChange.remove($permission.AccountName)
+ }
+ ElseIf ($permission.AccessRight -eq "Read") {
+ If (!$permissionRead.Contains($permission.AccountName)) {
+ Revoke-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName
+ Set-Attr $result "changed" $true;
+
+ Continue
+ }
+
+ # user got requested permissions
+ $permissionRead.Remove($permission.AccountName)
+ }
+ }
+ }
+
+ # add missing permissions
+ ForEach ($user in $permissionRead) {
+ Grant-SmbShareAccess -Force -Name $name -AccountName $user -AccessRight "Read"
+ Set-Attr $result "changed" $true;
+ }
+ ForEach ($user in $permissionChange) {
+ Grant-SmbShareAccess -Force -Name $name -AccountName $user -AccessRight "Change"
+ Set-Attr $result "changed" $true;
+ }
+ ForEach ($user in $permissionFull) {
+ Grant-SmbShareAccess -Force -Name $name -AccountName $user -AccessRight "Full"
+ Set-Attr $result "changed" $true;
+ }
+ ForEach ($user in $permissionDeny) {
+ Block-SmbShareAccess -Force -Name $name -AccountName $user
+ Set-Attr $result "changed" $true;
+ }
+ }
+}
+Catch {
+ Fail-Json $result "an error occured when attempting to create share $name"
+}
+
+Exit-Json $result
\ No newline at end of file
diff --git a/windows/win_share.py b/windows/win_share.py
new file mode 100644
index 00000000000..bca7646cf3f
--- /dev/null
+++ b/windows/win_share.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015, Hans-Joachim Kliemeck
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: win_share
+version_added: "2.1"
+short_description: Manage Windows shares
+description:
+ - Add, modify or remove Windows share and set share permissions.
+requirements:
+ - Windows 8.1 / Windows 2012 or newer
+options:
+ name:
+ description:
+ - Share name
+ required: yes
+ path:
+ description:
+ - Share directory
+ required: yes
+ state:
+ description:
+ - Specify whether to add C(present) or remove C(absent) the specified share
+ required: no
+ choices:
+ - present
+ - absent
+ default: present
+ description:
+ description:
+ - Share description
+ required: no
+ default: none
+ list:
+ description:
+ - Specify whether to allow or deny file listing, in case user got no permission on share
+ required: no
+ choices:
+ - yes
+ - no
+ default: none
+ read:
+ description:
+ - Specify user list that should get read access on share, separated by comma.
+ required: no
+ default: none
+ change:
+ description:
+ - Specify user list that should get read and write access on share, separated by comma.
+ required: no
+ default: none
+ full:
+ description:
+ - Specify user list that should get full access on share, separated by comma.
+ required: no
+ default: none
+ deny:
+ description:
+ - Specify user list that should get no access, regardless of implied access on share, separated by comma.
+ required: no
+ default: none
+author: Hans-Joachim Kliemeck (@h0nIg)
+'''
+
+EXAMPLES = '''
+# Playbook example
+# Add share and set permissions
+---
+- name: Add secret share
+ win_share:
+ name: internal
+ description: top secret share
+ path: C:/shares/internal
+ list: 'no'
+ full: Administrators,CEO
+ read: HR-Global
+ deny: HR-External
+
+- name: Add public company share
+ win_share:
+ name: company
+ description: top secret share
+ path: C:/shares/company
+ list: 'yes'
+ full: Administrators,CEO
+ read: Global
+
+# Remove previously added share
+ win_share:
+ name: internal
+ state: absent
+'''
+
+RETURN = '''
+
+'''
\ No newline at end of file
diff --git a/windows/win_timezone.ps1 b/windows/win_timezone.ps1
new file mode 100644
index 00000000000..03a6935052d
--- /dev/null
+++ b/windows/win_timezone.ps1
@@ -0,0 +1,71 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Phil Schwartz
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$result = New-Object psobject @{
+ win_timezone = New-Object psobject
+ changed = $false
+}
+
+$timezone = Get-Attr -obj $params -name timezone -failifempty $true -resultobj $result
+
+Try {
+ # Get the current timezone set
+ $currentTZ = $(tzutil.exe /g)
+ If ($LASTEXITCODE -ne 0) { Throw "An error occured when getting the current machine's timezone setting." }
+
+ If ( $currentTZ -eq $timezone ) {
+ Exit-Json $result "$timezone is already set on this machine"
+ }
+ Else {
+ $tzExists = $false
+ #Check that timezone can even be set (if it is listed from tzutil as an available timezone to the machine)
+ $tzList = $(tzutil.exe /l)
+ If ($LASTEXITCODE -ne 0) { Throw "An error occured when listing the available timezones." }
+ ForEach ($tz in $tzList) {
+ If ( $tz -eq $timezone ) {
+ $tzExists = $true
+ break
+ }
+ }
+
+ If ( $tzExists ) {
+ tzutil.exe /s "$timezone"
+ If ($LASTEXITCODE -ne 0) { Throw "An error occured when setting the specified timezone with tzutil." }
+ $newTZ = $(tzutil.exe /g)
+ If ($LASTEXITCODE -ne 0) { Throw "An error occured when getting the current machine's timezone setting." }
+
+ If ( $timezone -eq $newTZ ) {
+ $result.changed = $true
+ }
+ }
+ Else {
+ Fail-Json $result "The specified timezone: $timezone isn't supported on the machine."
+ }
+ }
+}
+Catch {
+ Fail-Json $result "Error setting timezone to: $timezone."
+}
+
+
+Exit-Json $result;
\ No newline at end of file
diff --git a/windows/win_timezone.py b/windows/win_timezone.py
new file mode 100644
index 00000000000..02b9bb9c457
--- /dev/null
+++ b/windows/win_timezone.py
@@ -0,0 +1,53 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Phil Schwartz
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: win_timezone
+version_added: "2.1"
+short_description: Sets Windows machine timezone
+description:
+ - Sets machine time to the specified timezone, the module will check if the provided timezone is supported on the machine.
+options:
+ timezone:
+ description:
+ - Timezone to set to. Example Central Standard Time
+ required: true
+ default: null
+ aliases: []
+
+author: Phil Schwartz
+'''
+
+
+EXAMPLES = '''
+ # Set machine's timezone to Central Standard Time
+ win_timezone:
+ timezone: "Central Standard Time"
+'''
+
+RETURN = '''# '''
diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1
index a62f246f5c8..59fbd33166c 100644
--- a/windows/win_unzip.ps1
+++ b/windows/win_unzip.ps1
@@ -19,6 +19,7 @@
# WANT_JSON
# POWERSHELL_COMMON
+
$params = Parse-Args $args;
$result = New-Object psobject @{
@@ -26,64 +27,47 @@ $result = New-Object psobject @{
changed = $false
}
-If ($params.creates) {
+$creates = Get-AnsibleParam -obj $params -name "creates"
+If ($creates -ne $null) {
If (Test-Path $params.creates) {
Exit-Json $result "The 'creates' file or directory already exists."
}
-
}
-If ($params.src) {
- $src = $params.src.toString()
-
- If (-Not (Test-Path -path $src)){
- Fail-Json $result "src file: $src does not exist."
- }
-
- $ext = [System.IO.Path]::GetExtension($src)
-}
-Else {
- Fail-Json $result "missing required argument: src"
+$src = Get-AnsibleParam -obj $params -name "src" -failifempty $true
+If (-Not (Test-Path -path $src)){
+ Fail-Json $result "src file: $src does not exist."
}
-If (-Not($params.dest -eq $null)) {
- $dest = $params.dest.toString()
+$ext = [System.IO.Path]::GetExtension($src)
- If (-Not (Test-Path $dest -PathType Container)){
- Try{
- New-Item -itemtype directory -path $dest
- }
- Catch {
- Fail-Json $result "Error creating $dest directory"
- }
- }
-}
-Else {
- Fail-Json $result "missing required argument: dest"
-}
-If ($params.recurse) {
- $recurse = ConvertTo-Bool ($params.recurse)
-}
-Else {
- $recurse = $false
+$dest = Get-AnsibleParam -obj $params -name "dest" -failifempty $true
+If (-Not (Test-Path $dest -PathType Container)){
+ Try{
+ New-Item -itemtype directory -path $dest
+ }
+ Catch {
+ $err_msg = $_.Exception.Message
+ Fail-Json $result "Error creating $dest directory! Msg: $err_msg"
+ }
}
-If ($params.rm) {
- $rm = ConvertTo-Bool ($params.rm)
-}
-Else {
- $rm = $false
-}
+$recurse = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "recurse" -default "false")
+$rm = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "rm" -default "false")
If ($ext -eq ".zip" -And $recurse -eq $false) {
Try {
$shell = New-Object -ComObject Shell.Application
- $shell.NameSpace($dest).copyhere(($shell.NameSpace($src)).items(), 20)
+ $zipPkg = $shell.NameSpace([IO.Path]::GetFullPath($src))
+ $destPath = $shell.NameSpace([IO.Path]::GetFullPath($dest))
+ # 20 means do not display any dialog (4) and overwrite any file (16)
+ $destPath.CopyHere($zipPkg.Items(), 20)
$result.changed = $true
}
Catch {
- Fail-Json $result "Error unzipping $src to $dest"
+ $err_msg = $_.Exception.Message
+ Fail-Json $result "Error unzipping $src to $dest! Msg: $err_msg"
}
}
# Requires PSCX
@@ -127,11 +111,12 @@ Else {
}
}
Catch {
+ $err_msg = $_.Exception.Message
If ($recurse) {
- Fail-Json $result "Error recursively expanding $src to $dest"
+ Fail-Json $result "Error recursively expanding $src to $dest! Msg: $err_msg"
}
Else {
- Fail-Json $result "Error expanding $src to $dest"
+ Fail-Json $result "Error expanding $src to $dest! Msg: $err_msg"
}
}
}
@@ -154,4 +139,4 @@ Set-Attr $result.win_unzip "src" $src.toString()
Set-Attr $result.win_unzip "dest" $dest.toString()
Set-Attr $result.win_unzip "recurse" $recurse.toString()
-Exit-Json $result;
\ No newline at end of file
+Exit-Json $result;
diff --git a/windows/win_unzip.py b/windows/win_unzip.py
index aa0180baf74..708a909820b 100644
--- a/windows/win_unzip.py
+++ b/windows/win_unzip.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_unzip
@@ -65,9 +69,9 @@
author: Phil Schwartz
'''
-EXAMPLES = '''
+EXAMPLES = r'''
# This unzips a library that was downloaded with win_get_url, and removes the file after extraction
-$ ansible -i hosts -m win_unzip -a "src=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=true" all
+$ ansible -i hosts -m win_unzip -a "src=C:\LibraryToUnzip.zip dest=C:\Lib rm=true" all
# Playbook example
# Simple unzip
@@ -95,12 +99,12 @@
- name: Grab PSCX msi
win_get_url:
url: 'http://download-codeplex.sec.s-msft.com/Download/Release?ProjectName=pscx&DownloadId=923562&FileTime=130585918034470000&Build=20959'
- dest: 'C:\\pscx.msi'
+ dest: 'C:\pscx.msi'
- name: Install PSCX
win_msi:
- path: 'C:\\pscx.msi'
+ path: 'C:\pscx.msi'
- name: Unzip gz log
win_unzip:
- src: "C:\\Logs\\application-error-logs.gz"
- dest: "C:\\ExtractedLogs\\application-error-logs"
+ src: "C:\Logs\application-error-logs.gz"
+ dest: "C:\ExtractedLogs\application-error-logs"
'''
diff --git a/windows/win_updates.ps1 b/windows/win_updates.ps1
index 92c1b93e1f8..a74e68f3663 100644
--- a/windows/win_updates.ps1
+++ b/windows/win_updates.ps1
@@ -1,7 +1,7 @@
#!powershell
# This file is part of Ansible
#
-# Copyright 2014, Trond Hindenes
+# Copyright 2015, Matt Davis
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,68 +19,406 @@
# WANT_JSON
# POWERSHELL_COMMON
-function Write-Log
-{
- param
- (
- [parameter(mandatory=$false)]
- [System.String]
- $message
- )
+$ErrorActionPreference = "Stop"
+$FormatEnumerationLimit = -1 # prevent out-string et al from truncating collection dumps
- $date = get-date -format 'yyyy-MM-dd hh:mm:ss.zz'
+<# Most of the Windows Update Agent API will not run under a remote token,
+which a remote WinRM session always has. win_updates uses the Task Scheduler
+to run the bulk of the update functionality under a local token. Powershell's
+Scheduled-Job capability provides a decent abstraction over the Task Scheduler
+and handles marshaling Powershell args in and output/errors/etc back. The
+module schedules a single job that executes all interactions with the Update
+Agent API, then waits for completion. A significant amount of hassle is
+involved to ensure that only one of these jobs is running at a time, and to
+clean up the various error conditions that can occur. #>
- Write-Host "$date $message"
+# define the ScriptBlock that will be passed to Register-ScheduledJob
+$job_body = {
+ Param(
+ [hashtable]$boundparms=@{},
+ [Object[]]$unboundargs=$()
+ )
- Out-File -InputObject "$date $message" -FilePath $global:LoggingFile -Append
+ Set-StrictMode -Version 2
+
+ $ErrorActionPreference = "Stop"
+ $DebugPreference = "Continue"
+ $FormatEnumerationLimit = -1 # prevent out-string et al from truncating collection dumps
+
+ # set this as a global for the Write-DebugLog function
+ $log_path = $boundparms['log_path']
+
+ Write-DebugLog "Scheduled job started with boundparms $($boundparms | out-string) and unboundargs $($unboundargs | out-string)"
+
+ # FUTURE: elevate this to module arg validation once we have it
+ Function MapCategoryNameToGuid {
+ Param([string] $category_name)
+
+ $category_guid = switch -exact ($category_name) {
+ # as documented by TechNet @ https://technet.microsoft.com/en-us/library/ff730937.aspx
+ "Application" {"5C9376AB-8CE6-464A-B136-22113DD69801"}
+ "Connectors" {"434DE588-ED14-48F5-8EED-A15E09A991F6"}
+ "CriticalUpdates" {"E6CF1350-C01B-414D-A61F-263D14D133B4"}
+ "DefinitionUpdates" {"E0789628-CE08-4437-BE74-2495B842F43B"}
+ "DeveloperKits" {"E140075D-8433-45C3-AD87-E72345B36078"}
+ "FeaturePacks" {"B54E7D24-7ADD-428F-8B75-90A396FA584F"}
+ "Guidance" {"9511D615-35B2-47BB-927F-F73D8E9260BB"}
+ "SecurityUpdates" {"0FA1201D-4330-4FA8-8AE9-B877473B6441"}
+ "ServicePacks" {"68C5B0A3-D1A6-4553-AE49-01D3A7827828"}
+ "Tools" {"B4832BD8-E735-4761-8DAF-37F882276DAB"}
+ "UpdateRollups" {"28BC880E-0592-4CBF-8F95-C79B17911D5F"}
+ "Updates" {"CD5FFD1E-E932-4E3A-BF74-18BF0B1BBD83"}
+ default { throw "Unknown category_name $category_name, must be one of (Application,Connectors,CriticalUpdates,DefinitionUpdates,DeveloperKits,FeaturePacks,Guidance,SecurityUpdates,ServicePacks,Tools,UpdateRollups,Updates)" }
+ }
+
+ return $category_guid
+ }
+
+ Function DoWindowsUpdate {
+ Param(
+ [string[]]$category_names=@("CriticalUpdates","SecurityUpdates","UpdateRollups"),
+ [ValidateSet("installed", "searched")]
+ [string]$state="installed",
+ [bool]$_ansible_check_mode=$false
+ )
+
+ $is_check_mode = $($state -eq "searched") -or $_ansible_check_mode
+
+ $category_guids = $category_names | % { MapCategoryNameToGUID $_ }
+
+ $update_status = @{ changed = $false }
+
+ Write-DebugLog "Creating Windows Update session..."
+ $session = New-Object -ComObject Microsoft.Update.Session
+
+ Write-DebugLog "Create Windows Update searcher..."
+ $searcher = $session.CreateUpdateSearcher()
+
+ # OR is only allowed at the top-level, so we have to repeat base criteria inside
+ # FUTURE: change this to client-side filtered?
+ $criteriabase = "IsInstalled = 0"
+ $criteria_list = $category_guids | % { "($criteriabase AND CategoryIDs contains '$_')" }
+
+ $criteria = [string]::Join(" OR ", $criteria_list)
+
+ Write-DebugLog "Search criteria: $criteria"
+
+ Write-DebugLog "Searching for updates to install in category IDs $category_guids..."
+ $searchresult = $searcher.Search($criteria)
+
+ Write-DebugLog "Creating update collection..."
+
+ $updates_to_install = New-Object -ComObject Microsoft.Update.UpdateColl
+
+ Write-DebugLog "Found $($searchresult.Updates.Count) updates"
+
+ $update_status.updates = @{ }
+
+ # FUTURE: add further filtering options
+ foreach($update in $searchresult.Updates) {
+ if(-Not $update.EulaAccepted) {
+ Write-DebugLog "Accepting EULA for $($update.Identity.UpdateID)"
+ $update.AcceptEula()
+ }
+
+ if($update.IsHidden) {
+ Write-DebugLog "Skipping hidden update $($update.Title)"
+ continue
+ }
+
+ Write-DebugLog "Adding update $($update.Identity.UpdateID) - $($update.Title)"
+ $res = $updates_to_install.Add($update)
+
+ $update_status.updates[$update.Identity.UpdateID] = @{
+ title = $update.Title
+ # TODO: pluck the first KB out (since most have just one)?
+ kb = $update.KBArticleIDs
+ id = $update.Identity.UpdateID
+ installed = $false
+ }
+ }
+
+ Write-DebugLog "Calculating pre-install reboot requirement..."
+
+ # calculate this early for check mode, and to see if we should allow updates to continue
+ $sysinfo = New-Object -ComObject Microsoft.Update.SystemInfo
+ $update_status.reboot_required = $sysinfo.RebootRequired
+ $update_status.found_update_count = $updates_to_install.Count
+ $update_status.installed_update_count = 0
+
+ # bail out here for check mode
+ if($is_check_mode -eq $true) {
+ Write-DebugLog "Check mode; exiting..."
+ Write-DebugLog "Return value: $($update_status | out-string)"
+
+ if($updates_to_install.Count -gt 0) { $update_status.changed = $true }
+ return $update_status
+ }
+
+ if($updates_to_install.Count -gt 0) {
+ if($update_status.reboot_required) {
+ throw "A reboot is required before more updates can be installed."
+ }
+ else {
+ Write-DebugLog "No reboot is pending..."
+ }
+ Write-DebugLog "Downloading updates..."
+ }
+
+ foreach($update in $updates_to_install) {
+ if($update.IsDownloaded) {
+ Write-DebugLog "Update $($update.Identity.UpdateID) already downloaded, skipping..."
+ continue
+ }
+ Write-DebugLog "Creating downloader object..."
+ $dl = $session.CreateUpdateDownloader()
+ Write-DebugLog "Creating download collection..."
+ $dl.Updates = New-Object -ComObject Microsoft.Update.UpdateColl
+ Write-DebugLog "Adding update $($update.Identity.UpdateID)"
+ $res = $dl.Updates.Add($update)
+ Write-DebugLog "Downloading update $($update.Identity.UpdateID)..."
+ $download_result = $dl.Download()
+ # FUTURE: configurable download retry
+ if($download_result.ResultCode -ne 2) { # OperationResultCode orcSucceeded
+ throw "Failed to download update $($update.Identity.UpdateID)"
+ }
+ }
+
+ if($updates_to_install.Count -lt 1 ) { return $update_status }
+
+ Write-DebugLog "Installing updates..."
+
+ # install as a batch so the reboot manager will suppress intermediate reboots
+ Write-DebugLog "Creating installer object..."
+ $inst = $session.CreateUpdateInstaller()
+ Write-DebugLog "Creating install collection..."
+ $inst.Updates = New-Object -ComObject Microsoft.Update.UpdateColl
+
+ foreach($update in $updates_to_install) {
+ Write-DebugLog "Adding update $($update.Identity.UpdateID)"
+ $res = $inst.Updates.Add($update)
+ }
+
+ # FUTURE: use BeginInstall w/ progress reporting so we can at least log intermediate install results
+ Write-DebugLog "Installing updates..."
+ $install_result = $inst.Install()
+
+ $update_success_count = 0
+ $update_fail_count = 0
+
+ # WU result API requires us to index in to get the install results
+ $update_index = 0
+
+ foreach($update in $updates_to_install) {
+ $update_result = $install_result.GetUpdateResult($update_index)
+ $update_resultcode = $update_result.ResultCode
+ $update_hresult = $update_result.HResult
+
+ $update_index++
+
+ $update_dict = $update_status.updates[$update.Identity.UpdateID]
+
+ if($update_resultcode -eq 2) { # OperationResultCode orcSucceeded
+ $update_success_count++
+ $update_dict.installed = $true
+ Write-DebugLog "Update $($update.Identity.UpdateID) succeeded"
+ }
+ else {
+ $update_fail_count++
+ $update_dict.installed = $false
+ $update_dict.failed = $true
+ $update_dict.failure_hresult_code = $update_hresult
+ Write-DebugLog "Update $($update.Identity.UpdateID) failed resultcode $update_hresult hresult $update_hresult"
+ }
+
+ }
+
+ if($update_fail_count -gt 0) {
+ $update_status.failed = $true
+ $update_status.msg="Failed to install one or more updates"
+ }
+ else { $update_status.changed = $true }
+
+ Write-DebugLog "Performing post-install reboot requirement check..."
+
+ # recalculate reboot status after installs
+ $sysinfo = New-Object -ComObject Microsoft.Update.SystemInfo
+ $update_status.reboot_required = $sysinfo.RebootRequired
+ $update_status.installed_update_count = $update_success_count
+ $update_status.failed_update_count = $update_fail_count
+
+ Write-DebugLog "Return value: $($update_status | out-string)"
+
+ return $update_status
+ }
+
+ Try {
+ # job system adds a bunch of cruft to top-level dict, so we have to send a sub-dict
+ return @{ job_output = DoWindowsUpdate @boundparms }
+ }
+ Catch {
+ $excep = $_
+ Write-DebugLog "Fatal exception: $($excep.Exception.Message) at $($excep.ScriptStackTrace)"
+ return @{ job_output = @{ failed=$true;error=$excep.Exception.Message;location=$excep.ScriptStackTrace } }
+ }
}
-$params = Parse-Args $args;
-$result = New-Object PSObject;
-Set-Attr $result "changed" $false;
+Function DestroyScheduledJob {
+ Param([string] $job_name)
+
+ # find a scheduled job with the same name (should normally fail)
+ $schedjob = Get-ScheduledJob -Name $job_name -ErrorAction SilentlyContinue
+
+ # nuke it if it's there
+ If($schedjob -ne $null) {
+ Write-DebugLog "ScheduledJob $job_name exists, ensuring it's not running..."
+ # can't manage jobs across sessions, so we have to resort to the Task Scheduler script object to kill running jobs
+ $schedserv = New-Object -ComObject Schedule.Service
+ Write-DebugLog "Connecting to scheduler service..."
+ $schedserv.Connect()
+ Write-DebugLog "Getting running tasks named $job_name"
+ $running_tasks = @($schedserv.GetRunningTasks(0) | Where-Object { $_.Name -eq $job_name })
+
+ Foreach($task_to_stop in $running_tasks) {
+ Write-DebugLog "Stopping running task $($task_to_stop.InstanceGuid)..."
+ $task_to_stop.Stop()
+ }
+
+ <# FUTURE: add a global waithandle for this to release any other waiters. Wait-Job
+ and/or polling will block forever, since the killed job object in the parent
+ session doesn't know it's been killed :( #>
+
+ Unregister-ScheduledJob -Name $job_name
+ }
-if(($params.logPath).Length -gt 0) {
- $global:LoggingFile = $params.logPath
-} else {
- $global:LoggingFile = "c:\ansible-playbook.log"
}
-if ($params.category) {
- $category = $params.category
-} else {
- $category = "critical"
+
+Function RunAsScheduledJob {
+ Param([scriptblock] $job_body, [string] $job_name, [scriptblock] $job_init, [Object[]] $job_arg_list=@())
+
+ DestroyScheduledJob -job_name $job_name
+
+ $rsj_args = @{
+ ScriptBlock = $job_body
+ Name = $job_name
+ ArgumentList = $job_arg_list
+ ErrorAction = "Stop"
+ ScheduledJobOption = @{ RunElevated=$True }
+ }
+
+ if($job_init) { $rsj_args.InitializationScript = $job_init }
+
+ Write-DebugLog "Registering scheduled job with args $($rsj_args | Out-String -Width 300)"
+ $schedjob = Register-ScheduledJob @rsj_args
+
+ # RunAsTask isn't available in PS3- fall back to a 2s future trigger
+ if($schedjob | Get-Member -Name RunAsTask) {
+ Write-DebugLog "Starting scheduled job (PS4 method)"
+ $schedjob.RunAsTask()
+ }
+ else {
+ Write-DebugLog "Starting scheduled job (PS3 method)"
+ Add-JobTrigger -inputobject $schedjob -trigger $(New-JobTrigger -once -at $(Get-Date).AddSeconds(2))
+ }
+
+ $sw = [System.Diagnostics.Stopwatch]::StartNew()
+
+ $job = $null
+
+ Write-DebugLog "Waiting for job completion..."
+
+ # Wait-Job can fail for a few seconds until the scheduled task starts- poll for it...
+ while ($job -eq $null) {
+ start-sleep -Milliseconds 100
+ if($sw.ElapsedMilliseconds -ge 30000) { # tasks scheduled right after boot on 2008R2 can take awhile to start...
+ Throw "Timed out waiting for scheduled task to start"
+ }
+
+ # FUTURE: configurable timeout so we don't block forever?
+ # FUTURE: add a global WaitHandle in case another instance kills our job, so we don't block forever
+ $job = Wait-Job -Name $schedjob.Name -ErrorAction SilentlyContinue
+ }
+
+ $sw = [System.Diagnostics.Stopwatch]::StartNew()
+
+ # NB: output from scheduled jobs is delayed after completion (including the sub-objects after the primary Output object is available)
+ While (($job.Output -eq $null -or -not ($job.Output | Get-Member -Name Keys -ErrorAction Ignore) -or -not $job.Output.Keys.Contains('job_output')) -and $sw.ElapsedMilliseconds -lt 15000) {
+ Write-DebugLog "Waiting for job output to populate..."
+ Start-Sleep -Milliseconds 500
+ }
+
+ # NB: fallthru on both timeout and success
+
+ $ret = @{
+ ErrorOutput = $job.Error
+ WarningOutput = $job.Warning
+ VerboseOutput = $job.Verbose
+ DebugOutput = $job.Debug
+ }
+
+ If ($job.Output -eq $null -or -not $job.Output.Keys.Contains('job_output')) {
+ $ret.Output = @{failed = $true; msg = "job output was lost"}
+ }
+ Else {
+ $ret.Output = $job.Output.job_output # sub-object returned, can only be accessed as a property for some reason
+ }
+
+ Try { # this shouldn't be fatal, but can fail with both Powershell errors and COM Exceptions, hence the dual error-handling...
+ Unregister-ScheduledJob -Name $job_name -Force -ErrorAction Continue
+ }
+ Catch {
+ Write-DebugLog "Error unregistering job after execution: $($_.Exception.ToString()) $($_.ScriptStackTrace)"
+ }
+
+ return $ret
}
-$installed_prior = get-wulist -isinstalled | foreach { $_.KBArticleIDs }
-set-attr $result "updates_already_present" $installed_prior
-
-write-log "Looking for updates in '$category'"
-set-attr $result "updates_category" $category
-$to_install = get-wulist -category $category
-$installed = @()
-foreach ($u in $to_install) {
- $kb = $u.KBArticleIDs
- write-log "Installing $kb - $($u.Title)"
- $install_result = get-wuinstall -KBArticleID $u.KBArticleIDs -acceptall -ignorereboot
- Set-Attr $result "updates_installed_KB$kb" $u.Title
- $installed += $kb
+Function Log-Forensics {
+ Write-DebugLog "Arguments: $job_args | out-string"
+ Write-DebugLog "OS Version: $([environment]::OSVersion.Version | out-string)"
+ Write-DebugLog "Running as user: $([System.Security.Principal.WindowsIdentity]::GetCurrent().Name)"
+ Write-DebugLog "Powershell version: $($PSVersionTable | out-string)"
+ # FUTURE: log auth method (kerb, password, etc)
}
-write-log "Installed: $($installed.count)"
-set-attr $result "updates_installed" $installed
-set-attr $result "updates_installed_count" $installed.count
-$result.changed = $installed.count -gt 0
-
-$installed_afterwards = get-wulist -isinstalled | foreach { $_.KBArticleIDs }
-set-attr $result "updates_installed_afterwards" $installed_afterwards
-
-$reboot_needed = Get-WURebootStatus
-write-log $reboot_needed
-if ($reboot_needed -match "not") {
- write-log "Reboot not required"
-} else {
- write-log "Reboot required"
- Set-Attr $result "updates_reboot_needed" $true
- $result.changed = $true
+
+# code shared between the scheduled job and the host script
+$common_inject = {
+ # FUTURE: capture all to a list, dump on error
+ Function Write-DebugLog {
+ Param(
+ [string]$msg
+ )
+
+ $DebugPreference = "Continue"
+ $ErrorActionPreference = "Continue"
+ $date_str = Get-Date -Format u
+ $msg = "$date_str $msg"
+
+ Write-Debug $msg
+
+ if($log_path -ne $null) {
+ Add-Content $log_path $msg
+ }
+ }
}
-Set-Attr $result "updates_success" "true"
-Exit-Json $result;
+# source the common code into the current scope so we can call it
+. $common_inject
+
+$parsed_args = Parse-Args $args $true
+# grr, why use PSCustomObject for args instead of just native hashtable?
+$parsed_args.psobject.properties | foreach -begin {$job_args=@{}} -process {$job_args."$($_.Name)" = $_.Value} -end {$job_args}
+
+# set the log_path for the global log function we injected earlier
+$log_path = $job_args['log_path']
+
+Log-Forensics
+
+Write-DebugLog "Starting scheduled job with args: $($job_args | Out-String -Width 300)"
+
+# pass the common code as job_init so it'll be injected into the scheduled job script
+$sjo = RunAsScheduledJob -job_init $common_inject -job_body $job_body -job_name ansible-win-updates -job_arg_list $job_args
+
+Write-DebugLog "Scheduled job completed with output: $($sjo.Output | Out-String -Width 300)"
+
+Exit-Json $sjo.Output
\ No newline at end of file
diff --git a/windows/win_updates.py b/windows/win_updates.py
index 13c57f2b6d1..3fa5d0e3278 100644
--- a/windows/win_updates.py
+++ b/windows/win_updates.py
@@ -1,7 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# (c) 2014, Peter Mounce
+# (c) 2015, Matt Davis
#
# This file is part of Ansible
#
@@ -21,37 +21,128 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_updates
-version_added: "1.9"
-short_description: Lists / Installs windows updates
+version_added: "2.0"
+short_description: Download and install Windows updates
description:
- - Installs windows updates using PSWindowsUpdate (http://gallery.technet.microsoft.com/scriptcenter/2d191bcd-3308-4edd-9de2-88dff796b0bc).
- - PSWindowsUpdate needs to be installed first - use win_chocolatey.
+ - Searches, downloads, and installs Windows updates synchronously by automating the Windows Update client
options:
- category:
- description:
- - Which category to install updates from
- required: false
- default: critical
- choices:
- - critical
- - security
- - (anything that is a valid update category)
- default: critical
- aliases: []
- logPath:
- description:
- - Where to log command output to
- required: false
- default: c:\\ansible-playbook.log
- aliases: []
-author: "Peter Mounce (@petemounce)"
+ category_names:
+ description:
+ - A scalar or list of categories to install updates from
+ required: false
+ default: ["CriticalUpdates","SecurityUpdates","UpdateRollups"]
+ choices:
+ - Application
+ - Connectors
+ - CriticalUpdates
+ - DefinitionUpdates
+ - DeveloperKits
+ - FeaturePacks
+ - Guidance
+ - SecurityUpdates
+ - ServicePacks
+ - Tools
+ - UpdateRollups
+ - Updates
+ state:
+ description:
+ - Controls whether found updates are returned as a list or actually installed.
+ - This module also supports Ansible check mode, which has the same effect as setting state=searched
+ required: false
+ default: installed
+ choices:
+ - installed
+ - searched
+ log_path:
+ description:
+ - If set, win_updates will append update progress to the specified file. The directory must already exist.
+ required: false
+author: "Matt Davis (@mattdavispdx)"
+notes:
+- win_updates must be run by a user with membership in the local Administrators group
+- win_updates will use the default update service configured for the machine (Windows Update, Microsoft Update, WSUS, etc)
+- win_updates does not manage reboots, but will signal when a reboot is required with the reboot_required return value.
+- win_updates can take a significant amount of time to complete (hours, in some cases). Performance depends on many factors, including OS version, number of updates, system load, and update server load.
'''
EXAMPLES = '''
- # Install updates from security category
- win_updates:
- category: security
+# Install all security, critical, and rollup updates
+- win_updates:
+ category_names:
+ - SecurityUpdates
+ - CriticalUpdates
+ - UpdateRollups
+
+# Install only security updates
+- win_updates:
+ category_names: SecurityUpdates
+
+# Search-only, return list of found updates (if any), log to c:\ansible_wu.txt
+- win_updates:
+ category_names: SecurityUpdates
+ state: searched
+ log_path: c:\ansible_wu.txt
+'''
+
+RETURN = '''
+reboot_required:
+ description: True when the target server requires a reboot to complete updates (no further updates can be installed until after a reboot)
+ returned: success
+ type: boolean
+ sample: True
+
+updates:
+ description: List of updates that were found/installed
+ returned: success
+ type: dictionary
+ sample:
+ contains:
+ title:
+ description: Display name
+ returned: always
+ type: string
+ sample: "Security Update for Windows Server 2012 R2 (KB3004365)"
+ kb:
+ description: A list of KB article IDs that apply to the update
+ returned: always
+ type: list of strings
+ sample: [ '3004365' ]
+ id:
+ description: Internal Windows Update GUID
+ returned: always
+ type: string (guid)
+ sample: "fb95c1c8-de23-4089-ae29-fd3351d55421"
+ installed:
+ description: Was the update successfully installed
+ returned: always
+ type: boolean
+ sample: True
+ failure_hresult_code:
+ description: The HRESULT code from a failed update
+ returned: on install failure
+ type: boolean
+ sample: 2147942402
+
+found_update_count:
+ description: The number of updates found needing to be applied
+ returned: success
+ type: int
+ sample: 3
+installed_update_count:
+ description: The number of updates successfully installed
+ returned: success
+ type: int
+ sample: 2
+failed_update_count:
+ description: The number of updates that failed to install
+ returned: always
+ type: int
+ sample: 0
'''
diff --git a/windows/win_uri.ps1 b/windows/win_uri.ps1
new file mode 100644
index 00000000000..d701ef56b92
--- /dev/null
+++ b/windows/win_uri.ps1
@@ -0,0 +1,86 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright 2015, Corwin Brown
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args;
+
+$result = New-Object psobject @{
+ win_uri = New-Object psobject
+}
+
+# Functions ###############################################
+
+Function ConvertTo-SnakeCase($input_string) {
+ $snake_case = $input_string -csplit "(?
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: win_uri
+version_added: "2.1"
+short_description: Interacts with webservices.
+description:
+ - Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE HTTP authentication mechanisms.
+options:
+ url:
+ description:
+ - HTTP or HTTPS URL in the form of (http|https)://host.domain:port/path
+ method:
+ description:
+ - The HTTP Method of the request or response.
+ default: GET
+ choices:
+ - GET
+ - POST
+ - PUT
+ - HEAD
+ - DELETE
+ - OPTIONS
+ - PATCH
+ - TRACE
+ - CONNECT
+ - REFRESH
+ content_type:
+ description:
+ - Sets the "Content-Type" header.
+ body:
+ description:
+ - The body of the HTTP request/response to the web service.
+ headers:
+ description:
+ - 'Key Value pairs for headers. Example "Host: www.somesite.com"'
+ use_basic_parsing:
+ description:
+ - This module relies upon 'Invoke-WebRequest', which by default uses the Internet Explorer Engine to parse a webpage. There's an edge-case where if a user hasn't run IE before, this will fail. The only advantage to using the Internet Explorer praser is that you can traverse the DOM in a powershell script. That isn't useful for Ansible, so by default we toggle 'UseBasicParsing'. However, you can toggle that off here.
+ choices:
+ - True
+ - False
+ default: True
+author: Corwin Brown (@blakfeld)
+"""
+
+EXAMPLES = """
+# Send a GET request and store the output:
+---
+- name: Perform a GET and Store Output
+ win_uri:
+ url: http://www.somesite.com/myendpoint
+ register: http_output
+
+# Set a HOST header to hit an internal webserver:
+---
+- name: Hit a Specific Host on the Server
+ win_uri:
+ url: http://my.internal.server.com
+ method: GET
+ headers:
+ host: "www.somesite.com"
+
+# Do a HEAD request on an endpoint
+---
+- name: Perform a HEAD on an Endpoint
+ win_uri:
+ url: http://www.somesite.com
+ method: HEAD
+
+# Post a body to an endpoint
+---
+- name: POST a Body to an Endpoint
+ win_uri:
+ url: http://www.somesite.com
+ method: POST
+ body: "{ 'some': 'json' }"
+"""
+
+RETURN = """
+url:
+ description: The Target URL
+ returned: always
+ type: string
+ sample: "https://www.ansible.com"
+method:
+ description: The HTTP method used.
+ returned: always
+ type: string
+ sample: "GET"
+content_type:
+ description: The "content-type" header used.
+ returned: always
+ type: string
+ sample: "application/json"
+use_basic_parsing:
+ description: The state of the "use_basic_parsing" flag.
+ returned: always
+ type: bool
+ sample: True
+body:
+ description: The content of the body used
+ returned: when body is specified
+ type: string
+ sample: '{"id":1}'
+ version_added: "2.3"
+status_code:
+ description: The HTTP Status Code of the response.
+ returned: success
+ type: int
+ sample: 200
+status_description:
+ description: A summery of the status.
+ returned: success
+ type: string
+ stample: "OK"
+raw_content:
+ description: The raw content of the HTTP response.
+ returned: success
+ type: string
+ sample: 'HTTP/1.1 200 OK\nX-XSS-Protection: 1; mode=block\nX-Frame-Options: SAMEORIGIN\nAlternate-Protocol: 443:quic,p=1\nAlt-Svc: quic="www.google.com:443"; ma=2592000; v="30,29,28,27,26,25",quic=":443"; ma=2...'
+headers:
+ description: The Headers of the response.
+ returned: success
+ type: dict
+ sample: {"Content-Type": "application/json"}
+raw_content_length:
+ description: The byte size of the response.
+ returned: success
+ type: int
+ sample: 54447
+"""
diff --git a/windows/win_webpicmd.ps1 b/windows/win_webpicmd.ps1
index 377edcdc3c8..a8624739d7c 100644
--- a/windows/win_webpicmd.ps1
+++ b/windows/win_webpicmd.ps1
@@ -25,14 +25,7 @@ $params = Parse-Args $args;
$result = New-Object PSObject;
Set-Attr $result "changed" $false;
-If ($params.name)
-{
- $package = $params.name
-}
-Else
-{
- Fail-Json $result "missing required argument: name"
-}
+$package = Get-AnsibleParam $params -name "name" -failifempty $true
Function Find-Command
{
@@ -42,9 +35,9 @@ Function Find-Command
)
$installed = get-command $command -erroraction Ignore
write-verbose "$installed"
- if ($installed.length -gt 0)
+ if ($installed)
{
- return $installed[0]
+ return $installed
}
return $null
}
@@ -87,8 +80,12 @@ Function Test-IsInstalledFromWebPI
}
Write-Verbose "$results"
- $matches = $results | select-string -pattern "^$package\s+"
- return $matches.length -gt 0
+ if ($results -match "^$package\s+")
+ {
+ return $true
+ }
+
+ return $false
}
Function Install-WithWebPICmd
@@ -112,8 +109,8 @@ Function Install-WithWebPICmd
}
write-verbose "$results"
- $success = $results | select-string -pattern "Install of Products: SUCCESS"
- if ($success.length -gt 0)
+
+ if ($results -match "Install of Products: SUCCESS")
{
$result.changed = $true
}
diff --git a/windows/win_webpicmd.py b/windows/win_webpicmd.py
index 215123cef8c..3fc9d7d4335 100644
--- a/windows/win_webpicmd.py
+++ b/windows/win_webpicmd.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_webpicmd