diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index cc50f9530da..300886a6973 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,55 +1,5 @@
-
+# This repository is locked
-##### ISSUE TYPE
-
- - Bug Report
- - Feature Idea
- - Documentation Report
+Please open all new issues and pull requests in https://github.com/ansible/ansible
-##### COMPONENT NAME
-
-
-##### ANSIBLE VERSION
-
-```
-
-```
-
-##### CONFIGURATION
-
-
-##### OS / ENVIRONMENT
-
-
-##### SUMMARY
-
-
-##### STEPS TO REPRODUCE
-
-
-
-```
-
-```
-
-
-
-##### EXPECTED RESULTS
-
-
-##### ACTUAL RESULTS
-
-
-
-```
-
-```
+For more information please see http://docs.ansible.com/ansible/dev_guide/repomerge.html
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 5cfd027103a..300886a6973 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,28 +1,5 @@
-##### ISSUE TYPE
-
- - Feature Pull Request
- - New Module Pull Request
- - Bugfix Pull Request
- - Docs Pull Request
+# This repository is locked
-##### COMPONENT NAME
-
+Please open all new issues and pull requests in https://github.com/ansible/ansible
-##### ANSIBLE VERSION
-
-```
-
-```
-
-##### SUMMARY
-
-
-
-
-
-```
-
-```
+For more information please see http://docs.ansible.com/ansible/dev_guide/repomerge.html
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index ea3133a6097..00000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-sudo: false
-language: python
-python:
- - "2.7"
-addons:
- apt:
- sources:
- - deadsnakes
- packages:
- - python2.4
- - python2.6
- - python3.5
-before_install:
- - git config user.name "ansible"
- - git config user.email "ansible@ansible.com"
- - if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then git rebase $TRAVIS_BRANCH; fi;
-install:
- - pip install git+https://github.com/ansible/ansible.git@devel#egg=ansible
- - pip install git+https://github.com/sivel/ansible-testing.git#egg=ansible_testing
-script:
- - python2.4 -m compileall -fq -x 'cloud/' .
- - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py
- - python2.6 -m compileall -fq .
- - python2.7 -m compileall -fq .
- - python3.4 -m compileall -fq .
- - python3.5 -m compileall -fq .
- - ansible-validate-modules --exclude 'utilities/' .
- #- ansible-validate-modules --exclude 'cloud/amazon/ec2_lc\.py|cloud/amazon/ec2_scaling_policy\.py|cloud/amazon/ec2_scaling_policy\.py|cloud/amazon/ec2_asg\.py|cloud/azure/azure\.py|packaging/os/rhn_register\.py|network/openswitch/ops_template\.py|system/hostname\.py|utilities/' .
- #- ./test-docs.sh core
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
deleted file mode 100644
index 7ec0a622a71..00000000000
--- a/CONTRIBUTING.md
+++ /dev/null
@@ -1,32 +0,0 @@
-Welcome To Ansible GitHub
-=========================
-
-Hi! Nice to see you here!
-
-If you'd like to ask a question
-===============================
-
-Please see [this web page ](http://docs.ansible.com/community.html) for community information, which includes pointers on how to ask questions on the [mailing lists](http://docs.ansible.com/community.html#mailing-list-information) and IRC.
-
-The github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those.
-
-If you'd like to contribute code
-================================
-
-Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page.
-
-If you'd like to file a bug
-===========================
-
-I'd also read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible-modules-core/blob/devel/.github/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it.
-
-Also please make sure you are testing on the latest released version of Ansible or the development branch.
-
-If you'd like to contribute code to an existing module
-======================================================
-Each module in Core is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a +1 by the owner in the comments will be merged by the Ansible team.
-
-Thanks!
-
-
-
diff --git a/COPYING b/COPYING
deleted file mode 100644
index 10926e87f11..00000000000
--- a/COPYING
+++ /dev/null
@@ -1,675 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-
- Copyright (C)
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- Copyright (C)
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-.
-
diff --git a/README.md b/README.md
index ebcf0c7b941..3bb1f395c56 100644
--- a/README.md
+++ b/README.md
@@ -1,30 +1,6 @@
-[](https://travis-ci.org/ansible/ansible-modules-core)
+**NOTE:** As of Ansible 2.3, modules are now in the
+[main Ansible repository](https://github.com/ansible/ansible/tree/devel/lib/ansible/modules).
-ansible-modules-core
-====================
+See the [repo merge guide](https://docs.ansible.com/ansible/dev_guide/repomerge.html) for more information.
-This repo contains Ansible's most popular modules that are shipped with Ansible.
-
-New module submissions for modules that do not yet exist should be submitted to ansible-modules-extras, rather than this repo.
-
-Take care to submit tickets to the appropriate repo where modules are contained. The docs.ansible.com website indicates this at the bottom of each module documentation page.
-
-Reporting bugs
-==============
-
-Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of module documentation page at [docs.ansible.com](http://docs.ansible.com/).
-
-Testing modules
-===============
-
-Ansible [module development guide](http://docs.ansible.com/developing_modules.html#testing-modules) contains the latest info about that.
-
-License
-=======
-
-As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license.
-
-Installation
-============
-
-There should be no need to install this repo separately as it should be included in any Ansible install using the official documented methods.
+This repo still exists to allow bug fixes for `stable-2.2` and older releases.
diff --git a/VERSION b/VERSION
deleted file mode 100644
index 47c909bbc53..00000000000
--- a/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-2.0.0-0.5.beta3
diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py
index 4e3189f5e70..a497ab851d9 100644
--- a/cloud/amazon/_ec2_ami_search.py
+++ b/cloud/amazon/_ec2_ami_search.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_ami_search
@@ -74,10 +78,18 @@
connection: local
tasks:
- name: Get the Ubuntu precise AMI
- ec2_ami_search: distro=ubuntu release=precise region=us-west-1 store=instance-store
+ ec2_ami_search:
+ distro: ubuntu
+ release: precise
+ region: us-west-1
+ store: instance-store
register: ubuntu_image
+
- name: Start the EC2 instance
- ec2: image={{ ubuntu_image.ami }} instance_type=m1.small key_name=mykey
+ ec2:
+ image: "{{ ubuntu_image.ami }}"
+ instance_type: m1.small
+ key_name: mykey
'''
import csv
diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py
index 615f11527c5..c05ae2df280 100644
--- a/cloud/amazon/cloudformation.py
+++ b/cloud/amazon/cloudformation.py
@@ -14,12 +14,25 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# upcoming features:
+# - Ted's multifile YAML concatenation
+# - changesets (and blocking/waiting for them)
+# - finish AWSRetry conversion
+# - move create/update code out of main
+# - unit tests
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: cloudformation
short_description: Create or delete an AWS CloudFormation stack
description:
- Launches an AWS CloudFormation stack and waits for it complete.
+notes:
+ - As of version 2.3, migrated to boto3 to enable new features. To match existing behavior, YAML parsing is done in the module, not given to AWS as YAML. This will change (in fact, it may change before 2.3 is out).
version_added: "1.1"
options:
stack_name:
@@ -56,23 +69,16 @@
version_added: "2.0"
stack_policy:
description:
- - the path of the cloudformation stack policy
+ - the path of the cloudformation stack policy. A policy cannot be removed once placed, but it can be modified. (for instance, [allow all updates](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
required: false
default: null
version_added: "1.9"
tags:
description:
- - Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later.
- Requires at least Boto version 2.6.0.
+ - Dictionary of tags to associate with stack and its resources during stack creation. Can be updated later, updating tags removes previous entries.
required: false
default: null
version_added: "1.4"
- region:
- description:
- - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
- required: true
- aliases: ['aws_region', 'ec2_region']
- version_added: "1.5"
template_url:
description:
- Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region as the stack. This parameter is mutually exclusive with 'template'. Either one of them is required if "state" parameter is "present"
@@ -80,23 +86,32 @@
version_added: "2.0"
template_format:
description:
- - For local templates, allows specification of json or yaml format
+ - (deprecated) For local templates, allows specification of json or yaml format. Templates are now passed raw to CloudFormation regardless of format. This parameter is ignored since Ansible 2.3.
default: json
choices: [ json, yaml ]
required: false
version_added: "2.0"
+ role_arn:
+ description:
+ - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
+ required: false
+ default: null
+ version_added: "2.3"
author: "James S. Martin (@jsmartin)"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+- aws
+- ec2
+requires: [ botocore>=1.4.57 ]
'''
EXAMPLES = '''
# Basic task example
- name: launch ansible cloudformation example
cloudformation:
- stack_name: "ansible-cloudformation"
+ stack_name: "ansible-cloudformation"
state: "present"
- region: "us-east-1"
+ region: "us-east-1"
disable_rollback: true
template: "files/cloudformation-example.json"
template_parameters:
@@ -110,9 +125,9 @@
# Basic role example
- name: launch ansible cloudformation example
cloudformation:
- stack_name: "ansible-cloudformation"
+ stack_name: "ansible-cloudformation"
state: "present"
- region: "us-east-1"
+ region: "us-east-1"
disable_rollback: true
template: "roles/cloudformation/files/cloudformation-example.json"
template_parameters:
@@ -132,9 +147,29 @@
# Use a template from a URL
- name: launch ansible cloudformation example
cloudformation:
- stack_name="ansible-cloudformation" state=present
- region=us-east-1 disable_rollback=true
- template_url=https://s3.amazonaws.com/my-bucket/cloudformation.template
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ args:
+ template_parameters:
+ KeyName: jmartin
+ DiskType: ephemeral
+ InstanceType: m1.small
+ ClusterSize: 3
+ tags:
+ Stack: ansible-cloudformation
+
+# Use a template from a URL, and assume a role to execute
+- name: launch ansible cloudformation example with role assumption
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
args:
template_parameters:
KeyName: jmartin
@@ -145,24 +180,59 @@
Stack: ansible-cloudformation
'''
+RETURN = '''
+events:
+ type: list
+ description: Most recent events in Cloudformation's event log. This may be from a previous run in some cases.
+ returned: always
+ sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
+log:
+ description: Debugging logs. Useful when modifying or finding an error.
+ returned: always
+ type: list
+ sample: ["updating stack"]
+stack_resources:
+ description: AWS stack resources and their status. List of dictionaries, one dict per resource.
+ type: list
+ sample: [
+ {
+ "last_updated_time": "2016-10-11T19:40:14.979000+00:00",
+ "logical_resource_id": "CFTestSg",
+ "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
+ "resource_type": "AWS::EC2::SecurityGroup",
+ "status": "UPDATE_COMPLETE",
+ "status_reason": null
+ }
+ ]
+stack_outputs:
+ type: dict
+ description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
+ returned: always
+ sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
+'''
+
import json
import time
-import yaml
+import sys
+import traceback
+
try:
- import boto
- import boto.cloudformation.connection
- HAS_BOTO = True
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
except ImportError:
- HAS_BOTO = False
+ HAS_BOTO3 = False
+# import a class, otherwise we'll use a fully qualified path
+from ansible.module_utils.ec2 import AWSRetry
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
- error = err.message
+ error = err.message + ' ' + str(err) + ' - ' + str(type(err))
else:
error = '%s: %s' % (Exception, err)
@@ -170,73 +240,112 @@ def boto_exception(err):
def boto_version_required(version_tuple):
- parts = boto.Version.split('.')
+ parts = boto3.__version__.split('.')
boto_version = []
try:
for part in parts:
boto_version.append(int(part))
- except:
+ except ValueError:
boto_version.append(-1)
return tuple(boto_version) >= tuple(version_tuple)
+def get_stack_events(cfn, stack_name):
+ '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
+ ret = {'events':[], 'log':[]}
+
+ try:
+ events = cfn.describe_stack_events(StackName=stack_name)
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ if 'does not exist' in error_msg:
+ # missing stack, don't bail.
+ ret['log'].append('Stack does not exist.')
+ return ret
+ ret['log'].append('Unknown error: ' + str(error_msg))
+ return ret
+
+ for e in events.get('StackEvents', []):
+ eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
+ ret['events'].append(eventline)
+
+ if e['ResourceStatus'].endswith('FAILED'):
+ failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
+ ret['log'].append(failline)
+
+ return ret
def stack_operation(cfn, stack_name, operation):
'''gets the status of a stack while it is created/updated/deleted'''
existed = []
- result = {}
- operation_complete = False
- while operation_complete == False:
+ while True:
try:
- stack = invoke_with_throttling_retries(cfn.describe_stacks, stack_name)[0]
+ stack = get_stack_facts(cfn, stack_name)
existed.append('yes')
except:
- if 'yes' in existed:
- result = dict(changed=True,
- output='Stack Deleted',
- events=map(str, list(stack.describe_events())))
+ # If the stack previously existed, and now can't be found then it's
+ # been deleted successfully.
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
+ else:
+ return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
+ ret = get_stack_events(cfn, stack_name)
+ if not stack:
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
else:
- result = dict(changed= True, output='Stack Not Found')
- break
- if '%s_COMPLETE' % operation == stack.stack_status:
- result = dict(changed=True,
- events = map(str, list(stack.describe_events())),
- output = 'Stack %s complete' % operation)
- break
- if 'ROLLBACK_COMPLETE' == stack.stack_status or '%s_ROLLBACK_COMPLETE' % operation == stack.stack_status:
- result = dict(changed=True, failed=True,
- events = map(str, list(stack.describe_events())),
- output = 'Problem with %s. Rollback complete' % operation)
- break
- elif '%s_FAILED' % operation == stack.stack_status:
- result = dict(changed=True, failed=True,
- events = map(str, list(stack.describe_events())),
- output = 'Stack %s failed' % operation)
- break
- elif '%s_ROLLBACK_FAILED' % operation == stack.stack_status:
- result = dict(changed=True, failed=True,
- events = map(str, list(stack.describe_events())),
- output = 'Stack %s rollback failed' % operation)
- break
+ ret.update({'changed': False, 'failed': True, 'output' : 'Stack not found.'})
+ return ret
+ elif stack['StackStatus'].endswith('_ROLLBACK_COMPLETE'):
+ ret.update({'changed': True, 'failed' :True, 'output': 'Problem with %s. Rollback complete' % operation})
+ return ret
+ # note the ordering of ROLLBACK_COMPLETE and COMPLETE, because otherwise COMPLETE will match both cases.
+ elif stack['StackStatus'].endswith('_COMPLETE'):
+ ret.update({'changed': True, 'output' : 'Stack %s complete' % operation })
+ return ret
+ elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
+ return ret
+ # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
+ elif stack['StackStatus'].endswith('_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
+ return ret
else:
+ # this can loop forever :/
time.sleep(5)
- return result
+ return {'failed': True, 'output':'Failed for unknown reasons.'}
+
+@AWSRetry.backoff(tries=3, delay=5)
+def describe_stacks(cfn, stack_name):
+ return cfn.describe_stacks(StackName=stack_name)
+
+def get_stack_facts(cfn, stack_name):
+ try:
+ stack_response = describe_stacks(cfn, stack_name)
+ stack_info = stack_response['Stacks'][0]
+ #except AmazonCloudFormationException as e:
+ except (botocore.exceptions.ValidationError,botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ if 'does not exist'.format(stack_name) in error_msg:
+ # missing stack, don't bail.
+ return None
+
+ # other error, bail.
+ raise err
+
+ if stack_response and stack_response.get('Stacks', None):
+ stacks = stack_response['Stacks']
+ if len(stacks):
+ stack_info = stacks[0]
+
+ return stack_info
-IGNORE_CODE = 'Throttling'
-MAX_RETRIES=3
-def invoke_with_throttling_retries(function_ref, *argv):
- retries=0
- while True:
- try:
- retval=function_ref(*argv)
- return retval
- except boto.exception.BotoServerError as e:
- if e.code != IGNORE_CODE or retries==MAX_RETRIES:
- raise e
- time.sleep(5 * (2**retries))
- retries += 1
def main():
- argument_spec = ec2_argument_spec()
+ argument_spec = ansible.module_utils.ec2.ec2_argument_spec()
argument_spec.update(dict(
stack_name=dict(required=True),
template_parameters=dict(required=False, type='dict', default={}),
@@ -246,7 +355,8 @@ def main():
stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'),
template_url=dict(default=None, required=False),
- template_format=dict(default='json', choices=['json', 'yaml'], required=False),
+ template_format=dict(default=None, choices=['json', 'yaml'], required=False),
+ role_arn=dict(default=None, required=False),
tags=dict(default=None, type='dict')
)
)
@@ -255,122 +365,105 @@ def main():
argument_spec=argument_spec,
mutually_exclusive=[['template_url', 'template']],
)
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 and botocore are required for this module')
+ # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
+ stack_params = {
+ 'Capabilities':['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
+ }
state = module.params['state']
- stack_name = module.params['stack_name']
+ stack_params['StackName'] = module.params['stack_name']
if module.params['template'] is None and module.params['template_url'] is None:
if state == 'present':
- module.fail_json('Module parameter "template" or "template_url" is required if "state" is "present"')
+ module.fail_json(msg='Module parameter "template" or "template_url" is required if "state" is "present"')
if module.params['template'] is not None:
- template_body = open(module.params['template'], 'r').read()
- else:
- template_body = None
-
- if module.params['template_format'] == 'yaml':
- if template_body is None:
- module.fail_json(msg='yaml format only supported for local templates')
- else:
- template_body = json.dumps(yaml.load(template_body), indent=2)
+ stack_params['TemplateBody'] = open(module.params['template'], 'r').read()
- notification_arns = module.params['notification_arns']
+ if module.params.get('notification_arns'):
+ stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
+ else:
+ stack_params['NotificationARNs'] = []
if module.params['stack_policy'] is not None:
- stack_policy_body = open(module.params['stack_policy'], 'r').read()
- else:
- stack_policy_body = None
+ stack_params['StackPolicyBody'] = open(module.params['stack_policy'], 'r').read()
- disable_rollback = module.params['disable_rollback']
template_parameters = module.params['template_parameters']
- tags = module.params['tags']
- template_url = module.params['template_url']
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+ stack_params['Parameters'] = [{'ParameterKey':k, 'ParameterValue':v} for k, v in template_parameters.items()]
- kwargs = dict()
- if tags is not None:
- if not boto_version_required((2,6,0)):
- module.fail_json(msg='Module parameter "tags" requires at least Boto version 2.6.0')
- kwargs['tags'] = tags
+ if isinstance(module.params.get('tags'), dict):
+ stack_params['Tags'] = ansible.module_utils.ec2.ansible_dict_to_boto3_tag_list(module.params['tags'])
+ if module.params.get('template_url'):
+ stack_params['TemplateURL'] = module.params['template_url']
- # convert the template parameters ansible passes into a tuple for boto
- template_parameters_tup = [(k, v) for k, v in template_parameters.items()]
- stack_outputs = {}
+ if module.params.get('role_arn'):
+ stack_params['RoleARN'] = module.params['role_arn']
- try:
- cfn = connect_to_aws(boto.cloudformation, region, **aws_connect_kwargs)
- except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg=str(e))
update = False
result = {}
- operation = None
+
+ try:
+ region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
+ cfn = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg=boto_exception(e))
+
+ stack_info = get_stack_facts(cfn, stack_params['StackName'])
# if state is present we are going to ensure that the stack is either
# created or updated
- if state == 'present':
+ if state == 'present' and not stack_info:
try:
- cfn.create_stack(stack_name, parameters=template_parameters_tup,
- template_body=template_body,
- notification_arns=notification_arns,
- stack_policy_body=stack_policy_body,
- template_url=template_url,
- disable_rollback=disable_rollback,
- capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
- **kwargs)
- operation = 'CREATE'
+ # 'disablerollback' only applies on creation, not update.
+ stack_params['DisableRollback'] = module.params['disable_rollback']
+
+ cfn.create_stack(**stack_params)
except Exception as err:
error_msg = boto_exception(err)
- if 'AlreadyExistsException' in error_msg or 'already exists' in error_msg:
- update = True
- else:
- module.fail_json(msg=error_msg)
- if not update:
- result = stack_operation(cfn, stack_name, operation)
-
- # if the state is present and the stack already exists, we try to update it
- # AWS will tell us if the stack template and parameters are the same and
- # don't need to be updated.
- if update:
+ #return {'error': error_msg}
+ module.fail_json(msg=error_msg)
+ result = stack_operation(cfn, stack_params['StackName'], 'CREATE')
+ if not result: module.fail_json(msg="empty result")
+
+ if state == 'present' and stack_info:
+ # if the state is present and the stack already exists, we try to update it.
+ # AWS will tell us if the stack template and parameters are the same and
+ # don't need to be updated.
try:
- cfn.update_stack(stack_name, parameters=template_parameters_tup,
- template_body=template_body,
- notification_arns=notification_arns,
- stack_policy_body=stack_policy_body,
- disable_rollback=disable_rollback,
- template_url=template_url,
- capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'])
- operation = 'UPDATE'
+ cfn.update_stack(**stack_params)
+ result = stack_operation(cfn, stack_params['StackName'], 'UPDATE')
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json(msg=error_msg)
-
- if operation == 'UPDATE':
- result = stack_operation(cfn, stack_name, operation)
+ if not result: module.fail_json(msg="empty result")
# check the status of the stack while we are creating/updating it.
# and get the outputs of the stack
if state == 'present' or update:
- stack = invoke_with_throttling_retries(cfn.describe_stacks,stack_name)[0]
- for output in stack.outputs:
- stack_outputs[output.key] = output.value
- result['stack_outputs'] = stack_outputs
- stack_resources = []
- for res in cfn.list_stack_resources(stack_name):
+ stack = get_stack_facts(cfn, stack_params['StackName'])
+ if result.get('stack_outputs') is None:
+ # always define stack_outputs, but it may be empty
+ result['stack_outputs'] = {}
+ for output in stack.get('Outputs', []):
+ result['stack_outputs'][output['OutputKey']] = output['OutputValue']
+ stack_resources = []
+ reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
+ for res in reslist.get('StackResourceSummaries', []):
stack_resources.append({
- "last_updated_time": res.last_updated_time,
- "logical_resource_id": res.logical_resource_id,
- "physical_resource_id": res.physical_resource_id,
- "status": res.resource_status,
- "status_reason": res.resource_status_reason,
- "resource_type": res.resource_type })
+ "logical_resource_id": res['LogicalResourceId'],
+ "physical_resource_id": res.get('PhysicalResourceId', ''),
+ "resource_type": res['ResourceType'],
+ "last_updated_time": res['LastUpdatedTimestamp'],
+ "status": res['ResourceStatus'],
+ "status_reason": res.get('ResourceStatusReason') # can be blank, apparently
+ })
result['stack_resources'] = stack_resources
# absent state is different because of the way delete_stack works.
@@ -378,24 +471,28 @@ def main():
# so must describe the stack first
if state == 'absent':
+ #result = {}
try:
- invoke_with_throttling_retries(cfn.describe_stacks,stack_name)
- operation = 'DELETE'
- except Exception as err:
- error_msg = boto_exception(err)
- if 'Stack:%s does not exist' % stack_name in error_msg:
- result = dict(changed=False, output='Stack not found.')
+ stack = get_stack_facts(cfn, stack_params['StackName'])
+ if not stack:
+ result = {'changed': False, 'output': 'Stack not found.'}
else:
- module.fail_json(msg=error_msg)
- if operation == 'DELETE':
- cfn.delete_stack(stack_name)
- result = stack_operation(cfn, stack_name, operation)
+ cfn.delete_stack(StackName=stack_params['StackName'])
+ result = stack_operation(cfn, stack_params['StackName'], 'DELETE')
+ except Exception as err:
+ module.fail_json(msg=boto_exception(err), exception=traceback.format_exc())
+ if module.params['template_format'] is not None:
+ result['warnings'] = [('Argument `template_format` is deprecated '
+ 'since Ansible 2.3, JSON and YAML templates are now passed '
+ 'directly to the CloudFormation API.')]
module.exit_json(**result)
+
# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.ec2 import *
+from ansible.module_utils.basic import AnsibleModule
+import ansible.module_utils.ec2
+
if __name__ == '__main__':
main()
diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py
index 1d96e492461..d19f06f3695 100644
--- a/cloud/amazon/ec2.py
+++ b/cloud/amazon/ec2.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2
@@ -29,6 +33,13 @@
required: false
default: null
aliases: ['keypair']
+ id:
+ version_added: "1.1"
+ description:
+ - identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ required: false
+ default: null
+ aliases: []
group:
description:
- security group (or list of groups) to use with the instance
@@ -427,12 +438,21 @@
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
register: ec2
+
- name: Add new instance to host group
- add_host: hostname={{ item.public_ip }} groupname=launched
- with_items: '{{ec2.instances}}'
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: launched
+ with_items: "{{ ec2.instances }}"
+
- name: Wait for SSH to come up
- wait_for: host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started
- with_items: '{{ec2.instances}}'
+ wait_for:
+ host: "{{ item.public_dns_name }}"
+ port: 22
+ delay: 60
+ timeout: 320
+ state: started
+ with_items: "{{ ec2.instances }}"
- name: Configure instance(s)
hosts: launched
@@ -580,6 +600,8 @@
import time
from ast import literal_eval
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six import get_function_code
try:
import boto.ec2
@@ -607,8 +629,8 @@ def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None):
def _set_none_to_blank(dictionary):
result = dictionary
- for k in result.iterkeys():
- if type(result[k]) == dict:
+ for k in result:
+ if isinstance(result[k], dict):
result[k] = _set_none_to_blank(result[k])
elif not result[k]:
result[k] = ""
@@ -622,29 +644,29 @@ def get_reservations(module, ec2, tags=None, state=None, zone=None):
if tags is not None:
- if type(tags) is str:
+ if isinstance(tags, str):
try:
tags = literal_eval(tags)
except:
pass
# if string, we only care that a tag of that name exists
- if type(tags) is str:
+ if isinstance(tags, str):
filters.update({"tag-key": tags})
# if list, append each item to filters
- if type(tags) is list:
+ if isinstance(tags, list):
for x in tags:
- if type(x) is dict:
+ if isinstance(x, dict):
x = _set_none_to_blank(x)
- filters.update(dict(("tag:"+tn, tv) for (tn,tv) in x.iteritems()))
+ filters.update(dict(("tag:"+tn, tv) for (tn,tv) in iteritems(x)))
else:
filters.update({"tag-key": x})
# if dict, add the key and value to the filter
- if type(tags) is dict:
+ if isinstance(tags, dict):
tags = _set_none_to_blank(tags)
- filters.update(dict(("tag:"+tn, tv) for (tn,tv) in tags.iteritems()))
+ filters.update(dict(("tag:"+tn, tv) for (tn,tv) in iteritems(tags)))
if state:
# http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
@@ -744,7 +766,7 @@ def boto_supports_profile_name_arg(ec2):
True if Boto library accept instance_profile_name argument, else false
"""
run_instances_method = getattr(ec2, 'run_instances')
- return 'instance_profile_name' in run_instances_method.func_code.co_varnames
+ return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
def create_block_device(module, ec2, volume):
# Not aware of a way to determine this programatically
@@ -772,7 +794,7 @@ def create_block_device(module, ec2, volume):
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
if 'encrypted' in volume:
- module.fail_json(msg = 'You can not set encyrption when creating a volume from a snapshot')
+ module.fail_json(msg = 'You can not set encryption when creating a volume from a snapshot')
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg = 'Cannot set both ephemeral and snapshot')
@@ -794,7 +816,64 @@ def boto_supports_param_in_spot_request(ec2, param):
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
method = getattr(ec2, 'request_spot_instances')
- return param in method.func_code.co_varnames
+ return param in get_function_code(method).co_varnames
+
+def await_spot_requests(module, ec2, spot_requests, count):
+ """
+ Wait for a group of spot requests to be fulfilled, or fail.
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances
+ count: Total number of instances to be created by the spot requests
+
+ Returns:
+ list of instance ID's created by the spot request(s)
+ """
+ spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
+ wait_complete = time.time() + spot_wait_timeout
+
+ spot_req_inst_ids = dict()
+ while time.time() < wait_complete:
+ reqs = ec2.get_all_spot_instance_requests()
+ for sirb in spot_requests:
+ if sirb.id in spot_req_inst_ids:
+ continue
+ for sir in reqs:
+ if sir.id != sirb.id:
+ continue # this is not our spot instance
+ if sir.instance_id is not None:
+ spot_req_inst_ids[sirb.id] = sir.instance_id
+ elif sir.state == 'open':
+ continue # still waiting, nothing to do here
+ elif sir.state == 'active':
+ continue # Instance is created already, nothing to do here
+ elif sir.state == 'failed':
+ module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
+ sir.id, sir.status.code, sir.fault.code, sir.fault.message))
+ elif sir.state == 'cancelled':
+ module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
+ elif sir.state == 'closed':
+ # instance is terminating or marked for termination
+ # this may be intentional on the part of the operator,
+ # or it may have been terminated by AWS due to capacity,
+ # price, or group constraints in this case, we'll fail
+ # the module if the reason for the state is anything
+ # other than termination by user. Codes are documented at
+ # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html
+ if sir.status.code == 'instance-terminated-by-user':
+ # do nothing, since the user likely did this on purpose
+ pass
+ else:
+ spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
+ module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
+
+ if len(spot_req_inst_ids) < count:
+ time.sleep(5)
+ else:
+ return spot_req_inst_ids.values()
+ module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime())
+
def enforce_count(module, ec2, vpc):
@@ -845,7 +924,7 @@ def enforce_count(module, ec2, vpc):
# ensure all instances are dictionaries
all_instances = []
for inst in instances:
- if type(inst) is not dict:
+ if not isinstance(inst, dict):
inst = get_instance_info(inst)
all_instances.append(inst)
@@ -969,7 +1048,7 @@ def create_instances(module, ec2, vpc, override_count=None):
if ebs_optimized:
params['ebs_optimized'] = ebs_optimized
- # 'tenancy' always has a default value, but it is not a valid parameter for spot instance resquest
+ # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
if not spot_price:
params['tenancy'] = tenancy
@@ -1051,8 +1130,9 @@ def create_instances(module, ec2, vpc, override_count=None):
private_ip_address = private_ip,
))
- # Spot instances do not support start/stop thereby not having the option to change shutdown behavior
- params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior
+ # For ordinary (not spot) instances, we can select 'stop'
+ # (the default) or 'terminate' here.
+ params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
res = ec2.run_instances(**params)
instids = [ i.id for i in res.instances ]
@@ -1088,11 +1168,11 @@ def create_instances(module, ec2, vpc, override_count=None):
module.fail_json(
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
- if boto_supports_param_in_spot_request(ec2, 'instance_initiated_shutdown_behavior'):
- params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior
- elif instance_initiated_shutdown_behavior:
+ # You can't tell spot instances to 'stop'; they will always be
+ # 'terminate'd. For convenience, we'll ignore the latter value.
+ if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
module.fail_json(
- msg="instance_initiated_shutdown_behavior parameter is not supported by your Boto version.")
+ msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
if spot_launch_group and isinstance(spot_launch_group, basestring):
params['launch_group'] = spot_launch_group
@@ -1105,23 +1185,7 @@ def create_instances(module, ec2, vpc, override_count=None):
# Now we have to do the intermediate waiting
if wait:
- spot_req_inst_ids = dict()
- spot_wait_timeout = time.time() + spot_wait_timeout
- while spot_wait_timeout > time.time():
- reqs = ec2.get_all_spot_instance_requests()
- for sirb in res:
- if sirb.id in spot_req_inst_ids:
- continue
- for sir in reqs:
- if sir.id == sirb.id and sir.instance_id is not None:
- spot_req_inst_ids[sirb.id] = sir.instance_id
- if len(spot_req_inst_ids) < count:
- time.sleep(5)
- else:
- break
- if spot_wait_timeout <= time.time():
- module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime())
- instids = spot_req_inst_ids.values()
+ instids = await_spot_requests(module, ec2, res, count)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message))
@@ -1235,7 +1299,7 @@ def terminate_instances(module, ec2, instance_ids):
instance_ids=terminated_instance_ids, \
filters={'instance-state-name':'terminated'})
try:
- num_terminated = len(response.pop().instances)
+ num_terminated = sum([len(res.instances) for res in response])
except Exception as e:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
@@ -1304,17 +1368,31 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags):
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
- running_instances_array = []
+ existing_instances_array = []
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
# Check "source_dest_check" attribute
- if inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
- inst.modify_attribute('sourceDestCheck', source_dest_check)
- changed = True
+ try:
+ if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
+ inst.modify_attribute('sourceDestCheck', source_dest_check)
+ changed = True
+ except boto.exception.EC2ResponseError as exc:
+ # instances with more than one Elastic Network Interface will
+ # fail, because they have the sourceDestCheck attribute defined
+ # per-interface
+ if exc.code == 'InvalidInstanceID':
+ for interface in inst.interfaces:
+ if interface.source_dest_check != source_dest_check:
+ ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
+ changed = True
+ else:
+ module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
+ exception=traceback.format_exc(exc))
# Check "termination_protection" attribute
- if inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection:
+ if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection
+ and termination_protection is not None):
inst.modify_attribute('disableApiTermination', termination_protection)
changed = True
@@ -1329,7 +1407,9 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags):
except EC2ResponseError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
+ existing_instances_array.append(inst.id)
+ instance_ids = list(set(existing_instances_array + (instance_ids or [])))
## Wait for all the instances to finish starting or stopping
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
@@ -1379,8 +1459,6 @@ def restart_instances(module, ec2, instance_ids, state, instance_tags):
termination_protection = module.params.get('termination_protection')
changed = False
instance_dict_array = []
- source_dest_check = module.params.get('source_dest_check')
- termination_protection = module.params.get('termination_protection')
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
# Fail unless the user defined instance tags
@@ -1398,17 +1476,30 @@ def restart_instances(module, ec2, instance_ids, state, instance_tags):
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
- running_instances_array = []
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
# Check "source_dest_check" attribute
- if inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
- inst.modify_attribute('sourceDestCheck', source_dest_check)
- changed = True
+ try:
+ if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
+ inst.modify_attribute('sourceDestCheck', source_dest_check)
+ changed = True
+ except boto.exception.EC2ResponseError as exc:
+ # instances with more than one Elastic Network Interface will
+ # fail, because they have the sourceDestCheck attribute defined
+ # per-interface
+ if exc.code == 'InvalidInstanceID':
+ for interface in inst.interfaces:
+ if interface.source_dest_check != source_dest_check:
+ ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
+ changed = True
+ else:
+ module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
+ exception=traceback.format_exc(exc))
# Check "termination_protection" attribute
- if inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection:
+ if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection
+ and termination_protection is not None):
inst.modify_attribute('disableApiTermination', termination_protection)
changed = True
@@ -1453,9 +1544,9 @@ def main():
instance_profile_name = dict(),
instance_ids = dict(type='list', aliases=['instance_id']),
source_dest_check = dict(type='bool', default=True),
- termination_protection = dict(type='bool', default=False),
+ termination_protection = dict(type='bool', default=None),
state = dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
- instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']),
+ instance_initiated_shutdown_behavior=dict(default=None, choices=['stop', 'terminate']),
exact_count = dict(type='int', default=None),
count_tag = dict(),
volumes = dict(type='list'),
@@ -1537,4 +1628,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py
index 1c1bd8354b1..de3a31c4a76 100644
--- a/cloud/amazon/ec2_ami.py
+++ b/cloud/amazon/ec2_ami.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_ami
@@ -247,7 +251,7 @@
type: string
sample: "435210894375"
platform:
- description: plaform of image
+ description: platform of image
returned: when AMI is created or already exists
type: string
sample: null
@@ -395,6 +399,8 @@ def create_image(module, ec2):
if img.state == 'available':
break
+ elif img.state == 'failed':
+ module.fail_json(msg="AMI creation failed, please see the AWS console for more details")
except boto.exception.EC2ResponseError as e:
if ('InvalidAMIID.NotFound' not in e.error_code and 'InvalidAMIID.Unavailable' not in e.error_code) and wait and i == wait_timeout - 1:
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help. %s: %s" % (e.error_code, e.error_message))
diff --git a/cloud/amazon/ec2_ami_find.py b/cloud/amazon/ec2_ami_find.py
index bc7ad944aa5..c6b986bdd80 100644
--- a/cloud/amazon/ec2_ami_find.py
+++ b/cloud/amazon/ec2_ami_find.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_ami_find
@@ -228,7 +232,7 @@
type: string
sample: "435210894375"
platform:
- description: plaform of image
+ description: platform of image
returned: when AMI found
type: string
sample: null
diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py
index 66261fdbbd7..be7c634d742 100644
--- a/cloud/amazon/ec2_asg.py
+++ b/cloud/amazon/ec2_asg.py
@@ -13,6 +13,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: ec2_asg
@@ -53,6 +57,12 @@
description:
- Maximum number of instances in group, if unspecified then the current group value will be used.
required: false
+ placement_group:
+ description:
+ - Physical location of your cluster placement group created in Amazon EC2.
+ required: false
+ version_added: "2.3"
+ default: None
desired_capacity:
description:
- Desired number of instances in group, if unspecified then the current group value will be used.
@@ -77,7 +87,7 @@
default: None
lc_check:
description:
- - Check to make sure instances that are being replaced with replace_instances do not aready have the current launch_config.
+ - Check to make sure instances that are being replaced with replace_instances do not already have the current launch_config.
required: false
version_added: "1.8"
default: True
@@ -113,7 +123,7 @@
version_added: "2.0"
wait_timeout:
description:
- - how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option.
+ - how long before wait instances to become viable when replaced. Used in conjunction with instance_ids option.
default: 300
version_added: "1.8"
wait_for_instances:
@@ -125,11 +135,30 @@
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- - For 'Default', when used to create a new autoscaling group, the "Default" value is used. When used to change an existent autoscaling group, the current termination policies are mantained
+ - For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the current termination policies are maintained.
required: false
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
+ notification_topic:
+ description:
+ - A SNS topic ARN to send auto scaling notifications to.
+ default: None
+ required: false
+ version_added: "2.2"
+ notification_types:
+ description:
+ - A list of auto scaling events to trigger notifications on.
+ default: ['autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR']
+ required: false
+ version_added: "2.2"
+ suspend_processes:
+ description:
+ - A list of scaling processes to suspend.
+ required: False
+ default: []
+ choices: ['Launch', 'Terminate', 'HealthCheck', 'ReplaceUnhealthy', 'AZRebalance', 'AlarmNotification', 'ScheduledActions', 'AddToLoadBalancer']
+ version_added: "2.3"
extends_documentation_fragment:
- aws
- ec2
@@ -203,6 +232,7 @@
import time
import logging as log
+import traceback
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
@@ -319,7 +349,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
def elb_healthy(asg_connection, elb_connection, module, group_name):
- healthy_instances = []
+ healthy_instances = set()
as_group = asg_connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
@@ -342,7 +372,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
for i in lb_instances:
if i.state == "InService":
- healthy_instances.append(i.instance_id)
+ healthy_instances.add(i.instance_id)
log.debug("{0}: {1}".format(i.instance_id, i.state))
return len(healthy_instances)
@@ -356,7 +386,7 @@ def wait_for_elb(asg_connection, module, group_name):
as_group = asg_connection.get_all_groups(names=[group_name])[0]
if as_group.load_balancers and as_group.health_check_type == 'ELB':
- log.debug("Waiting for ELB to consider intances healthy.")
+ log.debug("Waiting for ELB to consider instances healthy.")
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
@@ -374,6 +404,28 @@ def wait_for_elb(asg_connection, module, group_name):
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
+
+def suspend_processes(as_group, module):
+ suspend_processes = set(module.params.get('suspend_processes'))
+
+ try:
+ suspended_processes = set([p.process_name for p in as_group.suspended_processes])
+ except AttributeError:
+ # New ASG being created, no suspended_processes defined yet
+ suspended_processes = set()
+
+ if suspend_processes == suspended_processes:
+ return False
+
+ resume_processes = list(suspended_processes - suspend_processes)
+ if resume_processes:
+ as_group.resume_processes(resume_processes)
+
+ if suspend_processes:
+ as_group.suspend_processes(list(suspend_processes))
+
+ return True
+
def create_autoscaling_group(connection, module):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
@@ -381,6 +433,7 @@ def create_autoscaling_group(connection, module):
launch_config_name = module.params.get('launch_config_name')
min_size = module.params['min_size']
max_size = module.params['max_size']
+ placement_group = module.params.get('placement_group')
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
@@ -391,6 +444,8 @@ def create_autoscaling_group(connection, module):
as_groups = connection.get_all_groups(names=[group_name])
wait_timeout = module.params.get('wait_timeout')
termination_policies = module.params.get('termination_policies')
+ notification_topic = module.params.get('notification_topic')
+ notification_types = module.params.get('notification_types')
if not vpc_zone_identifier and not availability_zones:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
@@ -415,6 +470,8 @@ def create_autoscaling_group(connection, module):
availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()]
enforce_required_arguments(module)
launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
+ if len(launch_configs) == 0:
+ module.fail_json(msg="No launch config found with name %s" % launch_config_name)
ag = AutoScalingGroup(
group_name=group_name,
load_balancers=load_balancers,
@@ -422,6 +479,7 @@ def create_autoscaling_group(connection, module):
launch_config=launch_configs[0],
min_size=min_size,
max_size=max_size,
+ placement_group=placement_group,
desired_capacity=desired_capacity,
vpc_zone_identifier=vpc_zone_identifier,
connection=connection,
@@ -433,18 +491,27 @@ def create_autoscaling_group(connection, module):
try:
connection.create_auto_scaling_group(ag)
+ suspend_processes(ag, module)
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
+
+ if notification_topic:
+ ag.put_notification_configuration(notification_topic, notification_types)
+
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
changed = True
return(changed, asg_properties)
except BotoServerError as e:
- module.fail_json(msg=str(e))
+ module.fail_json(msg="Failed to create Autoscaling Group: %s" % str(e), exception=traceback.format_exc(e))
else:
as_group = as_groups[0]
changed = False
+
+ if suspend_processes(as_group, module):
+ changed = True
+
for attr in ASG_ATTRIBUTES:
if module.params.get(attr, None) is not None:
module_attr = module.params.get(attr)
@@ -453,14 +520,15 @@ def create_autoscaling_group(connection, module):
group_attr = getattr(as_group, attr)
# we do this because AWS and the module may return the same list
# sorted differently
- try:
- module_attr.sort()
- except:
- pass
- try:
- group_attr.sort()
- except:
- pass
+ if attr != 'termination_policies':
+ try:
+ module_attr.sort()
+ except:
+ pass
+ try:
+ group_attr.sort()
+ except:
+ pass
if group_attr != module_attr:
changed = True
setattr(as_group, attr, module_attr)
@@ -496,7 +564,13 @@ def create_autoscaling_group(connection, module):
try:
as_group.update()
except BotoServerError as e:
- module.fail_json(msg=str(e))
+ module.fail_json(msg="Failed to update Autoscaling Group: %s" % str(e), exception=traceback.format_exc(e))
+
+ if notification_topic:
+ try:
+ as_group.put_notification_configuration(notification_topic, notification_types)
+ except BotoServerError as e:
+ module.fail_json(msg="Failed to update Autoscaling Group notifications: %s" % str(e), exception=traceback.format_exc(e))
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
@@ -505,12 +579,17 @@ def create_autoscaling_group(connection, module):
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
except BotoServerError as e:
- module.fail_json(msg=str(e))
+ module.fail_json(msg="Failed to read existing Autoscaling Groups: %s" % str(e), exception=traceback.format_exc(e))
return(changed, asg_properties)
def delete_autoscaling_group(connection, module):
group_name = module.params.get('name')
+ notification_topic = module.params.get('notification_topic')
+
+ if notification_topic:
+ ag.delete_notification_configuration(notification_topic)
+
groups = connection.get_all_groups(names=[group_name])
if groups:
group = groups[0]
@@ -565,6 +644,14 @@ def replace(connection, module):
instances = props['instances']
if replace_instances:
instances = replace_instances
+
+ #check if min_size/max_size/desired capacity have been specified and if not use ASG values
+ if min_size is None:
+ min_size = as_group.min_size
+ if max_size is None:
+ max_size = as_group.max_size
+ if desired_capacity is None:
+ desired_capacity = as_group.desired_capacity
# check to see if instances are replaceable if checking launch configs
new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
@@ -587,16 +674,9 @@ def replace(connection, module):
if not old_instances:
changed = False
return(changed, props)
-
- #check if min_size/max_size/desired capacity have been specified and if not use ASG values
- if min_size is None:
- min_size = as_group.min_size
- if max_size is None:
- max_size = as_group.max_size
- if desired_capacity is None:
- desired_capacity = as_group.desired_capacity
+
# set temporary settings and wait for them to be reached
- # This should get overriden if the number of instances left is less than the batch size.
+ # This should get overwritten if the number of instances left is less than the batch size.
as_group = connection.get_all_groups(names=[group_name])[0]
update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
@@ -747,7 +827,7 @@ def wait_for_term_inst(connection, module, term_instances):
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
- if lifecycle == 'Terminating' or healthy == 'Unhealthy':
+ if lifecycle == 'Terminating' or health == 'Unhealthy':
count += 1
time.sleep(10)
@@ -785,6 +865,7 @@ def main():
launch_config_name=dict(type='str'),
min_size=dict(type='int'),
max_size=dict(type='int'),
+ placement_group=dict(type='str'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
@@ -798,7 +879,15 @@ def main():
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
- termination_policies=dict(type='list', default='Default')
+ termination_policies=dict(type='list', default='Default'),
+ notification_topic=dict(type='str', default=None),
+ notification_types=dict(type='list', default=[
+ 'autoscaling:EC2_INSTANCE_LAUNCH',
+ 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
+ 'autoscaling:EC2_INSTANCE_TERMINATE',
+ 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
+ ]),
+ suspend_processes=dict(type='list', default=[])
),
)
diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py
index be5691d07d1..22d950f9fb6 100644
--- a/cloud/amazon/ec2_eip.py
+++ b/cloud/amazon/ec2_eip.py
@@ -14,12 +14,17 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_eip
-short_description: associate an EC2 elastic IP with an instance.
+short_description: manages EC2 elastic IP (EIP) addresses.
description:
- - This module associates AWS EC2 elastic IP addresses with instances
+ - This module can allocate or release an EIP.
+ - This module can associate/disassociate an EIP with instances or network interfaces.
version_added: "1.4"
options:
device_id:
@@ -30,13 +35,15 @@
version_added: "2.0"
public_ip:
description:
- - The elastic IP address to associate with the instance.
- - If absent, allocate a new address
+ - The IP address of a previously allocated EIP.
+ - If present and device is specified, the EIP is associated with the device.
+ - If absent and device is specified, the EIP is disassociated from the device.
required: false
+ aliases: [ ip ]
state:
description:
- - If present, associate the IP with the instance.
- - If absent, disassociate the IP with the instance.
+ - If present, allocate an EIP or associate an existing EIP with a device.
+ - If absent, disassociate the EIP from the device and optionally release it.
required: false
choices: ['present', 'absent']
default: present
@@ -48,7 +55,7 @@
version_added: "1.4"
reuse_existing_ip_allowed:
description:
- - Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one.
+ - Reuse an EIP that is not associated to a device (when available), instead of allocating a new one.
required: false
default: false
version_added: "1.6"
@@ -64,8 +71,8 @@
author: "Rick Mendes (@rickmendes) "
notes:
- This module will return C(public_ip) on success, which will contain the
- public IP address associated with the instance.
- - There may be a delay between the time the Elastic IP is assigned and when
+ public IP address associated with the device.
+ - There may be a delay between the time the EIP is assigned and when
the cloud instance is reachable via the new address. Use wait_for and
pause to delay further playbook execution until the instance is reachable,
if necessary.
@@ -76,34 +83,67 @@
EXAMPLES = '''
- name: associate an elastic IP with an instance
- ec2_eip: device_id=i-1212f003 ip=93.184.216.119
+ ec2_eip:
+ device_id: i-1212f003
+ ip: 93.184.216.119
+
- name: associate an elastic IP with a device
- ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119
+ ec2_eip:
+ device_id: eni-c8ad70f3
+ ip: 93.184.216.119
+
- name: disassociate an elastic IP from an instance
- ec2_eip: device_id=i-1212f003 ip=93.184.216.119 state=absent
+ ec2_eip:
+ device_id: i-1212f003
+ ip: 93.184.216.119
+ state: absent
+
- name: disassociate an elastic IP with a device
- ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119 state=absent
+ ec2_eip:
+ device_id: eni-c8ad70f3
+ ip: 93.184.216.119
+ state: absent
+
- name: allocate a new elastic IP and associate it with an instance
- ec2_eip: device_id=i-1212f003
+ ec2_eip:
+ device_id: i-1212f003
+
- name: allocate a new elastic IP without associating it to anything
action: ec2_eip
register: eip
+
- name: output the IP
- debug: msg="Allocated IP is {{ eip.public_ip }}"
+ debug:
+ msg: "Allocated IP is {{ eip.public_ip }}"
+
- name: another way of allocating an elastic IP without associating it to anything
- ec2_eip: state='present'
+ ec2_eip:
+ state: 'present'
+
- name: provision new instances with ec2
- ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes'''
-''' group=webserver count=3
+ ec2:
+ keypair: mykey
+ instance_type: c1.medium
+ image: ami-40603AD1
+ wait: yes
+ group: webserver
+ count: 3
register: ec2
+
- name: associate new elastic IPs with each of the instances
- ec2_eip: "device_id={{ item }}"
- with_items: ec2.instance_ids
+ ec2_eip:
+ device_id: "{{ item }}"
+ with_items: "{{ ec2.instance_ids }}"
+
- name: allocate a new elastic IP inside a VPC in us-west-2
- ec2_eip: region=us-west-2 in_vpc=yes
+ ec2_eip:
+ region: us-west-2
+ in_vpc: yes
register: eip
+
- name: output the IP
- debug: msg="Allocated IP inside a VPC is {{ eip.public_ip }}"
+ debug:
+ msg: "Allocated IP inside a VPC is {{ eip.public_ip }}"
'''
try:
@@ -340,6 +380,8 @@ def main():
if device_id and device_id.startswith('i-'):
is_instance = True
elif device_id:
+ if device_id.startswith('eni-') and not in_vpc:
+ module.fail_json(msg="If you are specifying an ENI, in_vpc must be true")
is_instance = False
try:
diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py
index 7e741117745..cd2cf5fbae6 100644
--- a/cloud/amazon/ec2_elb.py
+++ b/cloud/amazon/ec2_elb.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: ec2_elb
@@ -92,7 +96,7 @@
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: present
- with_items: ec2_elbs
+ with_items: "{{ ec2_elbs }}"
"""
import time
@@ -204,7 +208,7 @@ def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
self.changed = True
break
elif self._is_instance_state_pending(instance_state):
- # If it's pending, we'll skip further checks andd continue waiting
+ # If it's pending, we'll skip further checks and continue waiting
pass
elif (awaited_state == 'InService'
and instance_state.reason_code == "Instance"
@@ -212,7 +216,7 @@ def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
# If the reason_code for the instance being out of service is
# "Instance" this indicates a failure state, e.g. the instance
# has failed a health check or the ELB does not have the
- # instance's availabilty zone enabled. The exact reason why is
+ # instance's availability zone enabled. The exact reason why is
# described in InstantState.description.
msg = ("The instance %s could not be put in service on %s."
" Reason: %s")
@@ -327,7 +331,7 @@ def main():
ec2_elbs={'default': None, 'required': False, 'type':'list'},
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
wait={'required': False, 'default': True, 'type': 'bool'},
- wait_timeout={'requred': False, 'default': 0, 'type': 'int'}
+ wait_timeout={'required': False, 'default': 0, 'type': 'int'}
)
)
diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py
index 7311a32a8fe..ca87a1cb3ad 100644
--- a/cloud/amazon/ec2_elb_lb.py
+++ b/cloud/amazon/ec2_elb_lb.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: ec2_elb_lb
@@ -135,7 +139,7 @@
version_added: "1.8"
stickiness:
description:
- - An associative array of stickness policy settings. Policy will be applied to all listeners ( see example )
+ - An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
wait:
@@ -313,10 +317,10 @@
- us-east-1d
listeners:
- protocol: http
- - load_balancer_port: 80
- - instance_port: 80
+ load_balancer_port: 80
+ instance_port: 80
-# Create an ELB with load balanacer stickiness enabled
+# Create an ELB with load balancer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
@@ -327,8 +331,8 @@
- us-east-1d
listeners:
- protocol: http
- - load_balancer_port: 80
- - instance_port: 80
+ load_balancer_port: 80
+ instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
@@ -345,8 +349,8 @@
- us-east-1d
listeners:
- protocol: http
- - load_balancer_port: 80
- - instance_port: 80
+ load_balancer_port: 80
+ instance_port: 80
stickiness:
type: application
enabled: yes
@@ -363,8 +367,8 @@
- us-east-1d
listeners:
- protocol: http
- - load_balancer_port: 80
- - instance_port: 80
+ load_balancer_port: 80
+ instance_port: 80
tags:
Name: "New ELB"
stack: "production"
@@ -381,8 +385,8 @@
- us-east-1d
listeners:
- protocol: http
- - load_balancer_port: 80
- - instance_port: 80
+ load_balancer_port: 80
+ instance_port: 80
tags: {}
"""
diff --git a/cloud/amazon/ec2_facts.py b/cloud/amazon/ec2_facts.py
index bcda99f16b3..498cf9c2dfe 100644
--- a/cloud/amazon/ec2_facts.py
+++ b/cloud/amazon/ec2_facts.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_facts
@@ -42,10 +46,11 @@
EXAMPLES = '''
# Conditional example
- name: Gather facts
- action: ec2_facts
+ ec2_facts:
- name: Conditional
- action: debug msg="This instance is a t1.micro"
+ debug:
+ msg: "This instance is a t1.micro"
when: ansible_ec2_instance_type == "t1.micro"
'''
@@ -182,4 +187,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py
index 4e05b7219b4..b381218f496 100644
--- a/cloud/amazon/ec2_group.py
+++ b/cloud/amazon/ec2_group.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_group
@@ -163,6 +167,10 @@ def validate_rule(module, rule):
VALID_PARAMS = ('cidr_ip',
'group_id', 'group_name', 'group_desc',
'proto', 'from_port', 'to_port')
+
+ if not isinstance(rule, dict):
+ module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
+
for k in rule:
if k not in VALID_PARAMS:
module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
@@ -471,4 +479,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_key.py b/cloud/amazon/ec2_key.py
index d0f361e3944..69d96fed977 100644
--- a/cloud/amazon/ec2_key.py
+++ b/cloud/amazon/ec2_key.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_key
@@ -241,4 +245,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py
index d36bb15ecd6..7a8754b8994 100644
--- a/cloud/amazon/ec2_lc.py
+++ b/cloud/amazon/ec2_lc.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: ec2_lc
@@ -308,4 +312,5 @@ def main():
elif state == 'absent':
delete_launch_config(connection, module)
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py
index d581b54f978..984211bc277 100644
--- a/cloud/amazon/ec2_metric_alarm.py
+++ b/cloud/amazon/ec2_metric_alarm.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py
index 5ca8eafcc75..bea3bfbca8a 100644
--- a/cloud/amazon/ec2_scaling_policy.py
+++ b/cloud/amazon/ec2_scaling_policy.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = """
module: ec2_scaling_policy
short_description: Create or delete AWS scaling policies for Autoscaling groups
diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py
index 0fa98a10d7a..b962e187608 100644
--- a/cloud/amazon/ec2_snapshot.py
+++ b/cloud/amazon/ec2_snapshot.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_snapshot
diff --git a/cloud/amazon/ec2_tag.py b/cloud/amazon/ec2_tag.py
index 3fd05dd4481..0fe20e1786d 100644
--- a/cloud/amazon/ec2_tag.py
+++ b/cloud/amazon/ec2_tag.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_tag
@@ -89,7 +93,7 @@
instance: "{{ item.id }}"
region: eu-west-1
state: list
- with_items: ec2.tagged_instances
+ with_items: "{{ ec2.tagged_instances }}"
register: ec2_vol
- name: tag the volumes
diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py
index 5d325d83fa3..cd76703f432 100644
--- a/cloud/amazon/ec2_vol.py
+++ b/cloud/amazon/ec2_vol.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_vol
@@ -135,7 +139,7 @@
- ec2_vol:
instance: "{{ item.id }} "
volume_size: 5
- with_items: ec2.instances
+ with_items: "{{ ec2.instances }}"
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
@@ -156,7 +160,7 @@
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
- with_items: ec2.instances
+ with_items: "{{ ec2.instances }}"
register: ec2_vol
# Remove a volume
diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py
index 85efff50eb7..5b0cfc51b0e 100644
--- a/cloud/amazon/ec2_vpc.py
+++ b/cloud/amazon/ec2_vpc.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_vpc
@@ -270,7 +274,7 @@ def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=Non
Checks if the remote routes match the local routes.
route_tables : Route_tables parameter in the module
- vpc_conn : The VPC conection object
+ vpc_conn : The VPC connection object
module : The module object
vpc : The vpc object for this route table
igw : The internet gateway object for this vpc
@@ -408,7 +412,7 @@ def create_vpc(module, vpc_conn):
for subnet in subnets:
add_subnet = True
subnet_tags_current = True
- new_subnet_tags = subnet.get('resource_tags', None)
+ new_subnet_tags = subnet.get('resource_tags', {})
subnet_tags_delete = []
for csn in current_subnets:
@@ -444,7 +448,7 @@ def create_vpc(module, vpc_conn):
if add_subnet:
try:
new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
- new_subnet_tags = subnet.get('resource_tags', None)
+ new_subnet_tags = subnet.get('resource_tags', {})
if new_subnet_tags:
# Sometimes AWS takes its time to create a subnet and so using new subnets's id
# to create tags results in exception.
@@ -503,7 +507,7 @@ def create_vpc(module, vpc_conn):
# Handle route tables - this may be worth splitting into a
# different module but should work fine here. The strategy to stay
- # indempotent is to basically build all the route tables as
+ # idempotent is to basically build all the route tables as
# defined, track the route table ids, and then run through the
# remote list of route tables and delete any that we didn't
# create. This shouldn't interrupt traffic in theory, but is the
@@ -742,4 +746,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py
index 54c4307b23f..7b7e0e46477 100644
--- a/cloud/amazon/ec2_vpc_net.py
+++ b/cloud/amazon/ec2_vpc_net.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ec2_vpc_net
@@ -25,7 +29,7 @@
options:
name:
description:
- - The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
+ - The name to give your VPC. This is used in combination with the cidr_block parameter to determine if a VPC already exists.
required: yes
cidr_block:
description:
diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py
index 5a3a60c005f..00098b171ef 100644
--- a/cloud/amazon/elasticache.py
+++ b/cloud/amazon/elasticache.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: elasticache
@@ -554,4 +558,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/elasticache_subnet_group.py b/cloud/amazon/elasticache_subnet_group.py
index f2aa2e25b9c..1e5708c03e3 100644
--- a/cloud/amazon/elasticache_subnet_group.py
+++ b/cloud/amazon/elasticache_subnet_group.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: elasticache_subnet_group
@@ -151,4 +155,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py
index 73fd6101576..79a3e346221 100644
--- a/cloud/amazon/iam.py
+++ b/cloud/amazon/iam.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: iam
@@ -78,7 +82,7 @@
default: '1'
access_key_ids:
description:
- - A list of the keys that you want impacted by the access_key_state paramter.
+ - A list of the keys that you want impacted by the access_key_state parameter.
groups:
description:
- A list of groups the user should belong to. When update, will gracefully remove groups not listed.
@@ -136,7 +140,7 @@
name: jdavila
state: update
groups: "{{ item.created_group.group_name }}"
- with_items: new_groups.results
+ with_items: "{{ new_groups.results }}"
# Example of role with custom trust policy for Lambda service
- name: Create IAM role with custom trust relationship
@@ -334,7 +338,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key
except boto.exception.BotoServerError as err:
error_msg = boto_exception(str(err))
if 'Password does not conform to the account password policy' in error_msg:
- module.fail_json(changed=False, msg="Passsword doesn't conform to policy")
+ module.fail_json(changed=False, msg="Password doesn't conform to policy")
else:
module.fail_json(msg=error_msg)
@@ -391,7 +395,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key
def set_users_groups(module, iam, name, groups, updated=None,
new_name=None):
- """ Sets groups for a user, will purge groups not explictly passed, while
+ """ Sets groups for a user, will purge groups not explicitly passed, while
retaining pre-existing groups that also are in the new list.
"""
changed = False
@@ -624,7 +628,7 @@ def main():
if iam_type == 'role' and state == 'update':
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
- "please specificy present or absent")
+ "please specify present or absent")
# check if trust_policy is present -- it can be inline JSON or a file path to a JSON file
if trust_policy_filepath:
@@ -795,4 +799,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/iam_cert.py b/cloud/amazon/iam_cert.py
index 8e56017052e..6e71b5552f5 100644
--- a/cloud/amazon/iam_cert.py
+++ b/cloud/amazon/iam_cert.py
@@ -13,6 +13,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: iam_cert
@@ -232,7 +236,7 @@ def main():
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False),
- dup_ok=dict(default=False, required=False, choices=[False, True], type='bool')
+ dup_ok=dict(default=False, required=False, type='bool')
)
)
diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py
index ce0c06a8d51..97be3f40512 100644
--- a/cloud/amazon/iam_policy.py
+++ b/cloud/amazon/iam_policy.py
@@ -13,6 +13,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: iam_policy
@@ -94,7 +98,7 @@
policy_name: "READ-ONLY"
policy_document: readonlypolicy.json
state: present
- with_items: new_groups.results
+ with_items: "{{ new_groups.results }}"
# Create a new S3 policy with prefix per user
tasks:
@@ -139,7 +143,7 @@ def user_action(module, iam, name, policy_name, skip, pdoc, state):
current_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
- pol = ""
+ matching_policies = []
for pol in current_policies:
'''
urllib is needed here because boto returns url encoded strings instead
@@ -147,13 +151,13 @@ def user_action(module, iam, name, policy_name, skip, pdoc, state):
if urllib.unquote(iam.get_user_policy(name, pol).
get_user_policy_result.policy_document) == pdoc:
policy_match = True
- break
+ matching_policies.append(pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
- if (not policy_match or not skip) and pol != name:
+ if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_user_policy(name, policy_name, pdoc)
elif state == 'absent':
@@ -191,18 +195,18 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
module.fail_json(msg=e.message)
try:
- pol = ""
+ matching_policies = []
for pol in current_policies:
if urllib.unquote(iam.get_role_policy(name, pol).
get_role_policy_result.policy_document) == pdoc:
policy_match = True
- break
+ matching_policies.append(pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
- if (not policy_match or not skip) and pol != name:
+ if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_role_policy(name, policy_name, pdoc)
elif state == 'absent':
@@ -236,20 +240,19 @@ def group_action(module, iam, name, policy_name, skip, pdoc, state):
current_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
- pol = ""
+ matching_policies = []
for pol in current_policies:
if urllib.unquote(iam.get_group_policy(name, pol).
get_group_policy_result.policy_document) == pdoc:
policy_match = True
- if policy_match:
- msg=("The policy document you specified already exists "
- "under the name %s." % pol)
- break
+ matching_policies.append(pol)
+ msg=("The policy document you specified already exists "
+ "under the name %s." % pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
- if (not policy_match or not skip) and pol != name:
+ if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_group_policy(name, policy_name, pdoc)
elif state == 'absent':
@@ -283,7 +286,7 @@ def main():
iam_name=dict(default=None, required=False),
policy_name=dict(default=None, required=True),
policy_document=dict(default=None, required=False),
- policy_json=dict(default=None, required=False),
+ policy_json=dict(type='json', default=None, required=False),
skip_duplicates=dict(type='bool', default=True, required=False)
))
@@ -350,4 +353,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py
index a54863b7d15..41438da1843 100644
--- a/cloud/amazon/rds.py
+++ b/cloud/amazon/rds.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rds
@@ -115,7 +119,7 @@
description:
- Port number that the DB instance uses for connections. Used only when command=create or command=replicate.
- Prior to 2.0 it always defaults to null and the API would use 3306, it had to be set to other DB default values when not using MySql.
- Starting at 2.0 it auotmaticaly defaults to what is expected for each c(db_engine).
+ Starting at 2.0 it automatically defaults to what is expected for each c(db_engine).
required: false
default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL.
upgrade:
@@ -223,7 +227,9 @@
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
-
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
@@ -299,9 +305,9 @@
instance_name: MyNewInstanceName
region: us-west-2
vpc_security_groups: sg-xxx945xx
-
-- debug: msg="The new db endpoint is {{ rds.instance.endpoint }}"
+- debug:
+ msg: "The new db endpoint is {{ rds.instance.endpoint }}"
'''
import sys
@@ -539,8 +545,8 @@ def get_data(self):
'iops' : self.instance.iops
}
- # Endpoint exists only if the instance is available
- if self.status == 'available':
+ # Only assign an Endpoint if one is available
+ if hasattr(self.instance, 'endpoint'):
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
@@ -587,9 +593,9 @@ def get_data(self):
}
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
- if self.status == 'available':
- d['endpoint'] = self.instance["Endpoint"]["Address"]
- d['port'] = self.instance["Endpoint"]["Port"]
+ if "Endpoint" in self.instance and self.instance["Endpoint"] is not None:
+ d['endpoint'] = self.instance["Endpoint"].get('Address', None)
+ d['port'] = self.instance["Endpoint"].get('Port', None)
else:
d['endpoint'] = None
d['port'] = None
@@ -1102,4 +1108,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/rds_param_group.py b/cloud/amazon/rds_param_group.py
index 3ef82835aea..154fed391aa 100644
--- a/cloud/amazon/rds_param_group.py
+++ b/cloud/amazon/rds_param_group.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rds_param_group
@@ -161,7 +165,7 @@ def set_parameter(param, value, immediate):
# may be based on a variable (ie. {foo*3/4}) so
# just pass it on through to boto
converted_value = str(value)
- elif type(value) == bool:
+ elif isinstance(value, bool):
converted_value = 1 if value else 0
else:
converted_value = int(value)
@@ -234,7 +238,7 @@ def main():
immediate = module.params.get('immediate') or False
if state == 'present':
- for required in ['name', 'description', 'engine', 'params']:
+ for required in ['name', 'description', 'engine']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
diff --git a/cloud/amazon/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py
index 7ec4ac01b7c..bec08cf61d6 100644
--- a/cloud/amazon/rds_subnet_group.py
+++ b/cloud/amazon/rds_subnet_group.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rds_subnet_group
@@ -151,4 +155,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py
index 107ca757d95..6452fefb359 100644
--- a/cloud/amazon/route53.py
+++ b/cloud/amazon/route53.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: route53
@@ -217,13 +221,13 @@
# Add an alias record that points to an Amazon ELB:
- route53:
- command=create
- zone=foo.com
- record=elb.foo.com
- type=A
- value="{{ elb_dns_name }}"
- alias=True
- alias_hosted_zone_id="{{ elb_zone_id }}"
+ command: create
+ zone: foo.com
+ record: elb.foo.com
+ type: A
+ value: "{{ elb_dns_name }}"
+ alias: True
+ alias_hosted_zone_id: "{{ elb_zone_id }}"
# Retrieve the details for elb.foo.com
- route53:
@@ -246,14 +250,14 @@
# Add an alias record that points to an Amazon ELB and evaluates it health:
- route53:
- command=create
- zone=foo.com
- record=elb.foo.com
- type=A
- value="{{ elb_dns_name }}"
- alias=True
- alias_hosted_zone_id="{{ elb_zone_id }}"
- alias_evaluate_target_health=True
+ command: create
+ zone: foo.com
+ record: elb.foo.com
+ type: A
+ value: "{{ elb_dns_name }}"
+ alias: True
+ alias_hosted_zone_id: "{{ elb_zone_id }}"
+ alias_evaluate_target_health: True
# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value
# that the entire parameter list must be quoted:
@@ -440,10 +444,10 @@ def main():
value_list = ()
- if type(value_in) is str:
+ if isinstance(value_in, str):
if value_in:
value_list = sorted([s.strip() for s in value_in.split(',')])
- elif type(value_in) is list:
+ elif isinstance(value_in, list):
value_list = sorted(value_in)
if zone_in[-1:] != '.':
@@ -586,4 +590,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py
index bb3d0145d42..9974a4f467e 100755
--- a/cloud/amazon/s3.py
+++ b/cloud/amazon/s3.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: s3
@@ -154,48 +158,101 @@
'''
EXAMPLES = '''
-# Simple PUT operation
-- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
-
-# Simple PUT operation in Ceph RGW S3
-- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put rgw=true s3_url=http://localhost:8000
-
-# Simple GET operation
-- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
-
-# Get a specific version of an object.
-- s3: bucket=mybucket object=/my/desired/key.txt version=48c9ee5131af7a716edc22df9772aa6f dest=/usr/local/myfile.txt mode=get
-
-# PUT/upload with metadata
-- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
-
-# PUT/upload with custom headers
-- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put headers=x-amz-grant-full-control=emailAddress=owner@example.com
-
-# List keys simple
-- s3: bucket=mybucket mode=list
-
-# List keys all options
-- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472
-
-# Create an empty bucket
-- s3: bucket=mybucket mode=create permission=public-read
-
-# Create a bucket with key as directory, in the EU region
-- s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1
-
-# Delete a bucket and all contents
-- s3: bucket=mybucket mode=delete
-
-# GET an object but dont download if the file checksums match. New in 2.0
-- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different
-
-# Delete an object from a bucket
-- s3: bucket=mybucket object=/my/desired/key.txt mode=delobj
+- name: Simple PUT operation
+ s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+
+- name: Simple PUT operation in Ceph RGW S3
+ s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ rgw: true
+ s3_url: "http://localhost:8000"
+
+- name: Simple GET operation
+ s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Get a specific version of an object.
+ s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ version: 48c9ee5131af7a716edc22df9772aa6f
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: PUT/upload with metadata
+ s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
+
+- name: PUT/upload with custom headers
+ s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
+
+- name: List keys simple
+ s3:
+ bucket: mybucket
+ mode: list
+
+- name: List keys all options
+ s3:
+ bucket: mybucket
+ mode: list
+ prefix: /my/desired/
+ marker: /my/desired/0023.txt
+ max_keys: 472
+
+- name: Create an empty bucket
+ s3:
+ bucket: mybucket
+ mode: create
+ permission: public-read
+
+- name: Create a bucket with key as directory, in the EU region
+ s3:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+ region: eu-west-1
+
+- name: Delete a bucket and all contents
+ s3:
+ bucket: mybucket
+ mode: delete
+
+- name: GET an object but dont download if the file checksums match. New in 2.0
+ s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+ overwrite: different
+
+- name: Delete an object from a bucket
+ s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ mode: delobj
'''
import os
-import urlparse
+from ansible.module_utils.six.moves.urllib.parse import urlparse
from ssl import SSLError
try:
@@ -683,4 +740,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py
index 89ab576ea14..60cdbbe0479 100644
--- a/cloud/azure/azure.py
+++ b/cloud/azure/azure.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure
@@ -485,9 +489,9 @@ def terminate_virtual_machine(module, azure):
except AzureException as e:
module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))
- # It's unclear when disks associated with terminated deployment get detatched.
+ # It's unclear when disks associated with terminated deployment get detached.
# Thus, until the wait_timeout is reached, we continue to delete disks as they
- # become detatched by polling the list of remaining disks and examining the state.
+ # become detached by polling the list of remaining disks and examining the state.
try:
_delete_disks_when_detached(azure, wait_timeout, disk_names)
except (AzureException, TimeoutError) as e:
@@ -535,7 +539,7 @@ def main():
management_cert_path=dict(),
endpoints=dict(default='22'),
user=dict(),
- password=dict(),
+ password=dict(no_log=True),
image=dict(),
virtual_network_name=dict(default=None),
state=dict(default='present'),
@@ -594,7 +598,7 @@ def __getattr__(self, name):
raise AttributeError(name)
def _wrap(self, func, args, kwargs):
- if type(func) == MethodType:
+ if isinstance(func, MethodType):
result = self._handle_temporary_redirects(lambda: func(*args, **kwargs))
else:
result = self._handle_temporary_redirects(lambda: func(self.other, *args, **kwargs))
diff --git a/cloud/azure/azure_rm_networkinterface.py b/cloud/azure/azure_rm_networkinterface.py
index 742306ddb9f..651da3caadd 100644
--- a/cloud/azure/azure_rm_networkinterface.py
+++ b/cloud/azure/azure_rm_networkinterface.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_networkinterface
diff --git a/cloud/azure/azure_rm_networkinterface_facts.py b/cloud/azure/azure_rm_networkinterface_facts.py
index 07e0e2bd0a1..b82a9bf690b 100644
--- a/cloud/azure/azure_rm_networkinterface_facts.py
+++ b/cloud/azure/azure_rm_networkinterface_facts.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_networkinterface_facts
diff --git a/cloud/azure/azure_rm_publicipaddress.py b/cloud/azure/azure_rm_publicipaddress.py
index fd5f07b9f30..7aa05d4ed5e 100644
--- a/cloud/azure/azure_rm_publicipaddress.py
+++ b/cloud/azure/azure_rm_publicipaddress.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_publicipaddress
diff --git a/cloud/azure/azure_rm_publicipaddress_facts.py b/cloud/azure/azure_rm_publicipaddress_facts.py
index a9fc27bab5c..d6b443756a3 100644
--- a/cloud/azure/azure_rm_publicipaddress_facts.py
+++ b/cloud/azure/azure_rm_publicipaddress_facts.py
@@ -20,6 +20,10 @@
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_publicip_facts
diff --git a/cloud/azure/azure_rm_resourcegroup.py b/cloud/azure/azure_rm_resourcegroup.py
index 66c5eb52b09..34a01afef05 100644
--- a/cloud/azure/azure_rm_resourcegroup.py
+++ b/cloud/azure/azure_rm_resourcegroup.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_resourcegroup
diff --git a/cloud/azure/azure_rm_resourcegroup_facts.py b/cloud/azure/azure_rm_resourcegroup_facts.py
index 01278794731..a6f7d1c7376 100644
--- a/cloud/azure/azure_rm_resourcegroup_facts.py
+++ b/cloud/azure/azure_rm_resourcegroup_facts.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_resouregroup_facts
diff --git a/cloud/azure/azure_rm_securitygroup.py b/cloud/azure/azure_rm_securitygroup.py
index db24bacd409..de2569d0941 100644
--- a/cloud/azure/azure_rm_securitygroup.py
+++ b/cloud/azure/azure_rm_securitygroup.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_securitygroup
diff --git a/cloud/azure/azure_rm_securitygroup_facts.py b/cloud/azure/azure_rm_securitygroup_facts.py
index a4ba06a3810..66c3cd20da6 100644
--- a/cloud/azure/azure_rm_securitygroup_facts.py
+++ b/cloud/azure/azure_rm_securitygroup_facts.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_securitygroup_facts
diff --git a/cloud/azure/azure_rm_storageaccount.py b/cloud/azure/azure_rm_storageaccount.py
index 99f92ece745..b71db78e96e 100644
--- a/cloud/azure/azure_rm_storageaccount.py
+++ b/cloud/azure/azure_rm_storageaccount.py
@@ -20,6 +20,10 @@
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_storageaccount
diff --git a/cloud/azure/azure_rm_storageaccount_facts.py b/cloud/azure/azure_rm_storageaccount_facts.py
index 386647ddf66..bbc18eb0ad5 100644
--- a/cloud/azure/azure_rm_storageaccount_facts.py
+++ b/cloud/azure/azure_rm_storageaccount_facts.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_storageaccount_facts
diff --git a/cloud/azure/azure_rm_storageblob.py b/cloud/azure/azure_rm_storageblob.py
index 3e5bd85ee6d..9ff235fd961 100644
--- a/cloud/azure/azure_rm_storageblob.py
+++ b/cloud/azure/azure_rm_storageblob.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_storageblob
@@ -185,7 +189,7 @@
"type": "BlockBlob"
}
container:
- description: Facts about the current state of the selcted container.
+ description: Facts about the current state of the selected container.
returned: always
type: dict
sample: {
diff --git a/cloud/azure/azure_rm_subnet.py b/cloud/azure/azure_rm_subnet.py
index d11c35f2e5b..5155715ac28 100644
--- a/cloud/azure/azure_rm_subnet.py
+++ b/cloud/azure/azure_rm_subnet.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_subnet
diff --git a/cloud/azure/azure_rm_virtualmachine.py b/cloud/azure/azure_rm_virtualmachine.py
index 868e4ed7a2c..40010df8eba 100644
--- a/cloud/azure/azure_rm_virtualmachine.py
+++ b/cloud/azure/azure_rm_virtualmachine.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_virtualmachine
@@ -158,7 +162,7 @@
required: false
public_ip_allocation_method:
description:
- - If a public IP address is created when creating the VM (beacuse a Network Interface was not provided),
+ - If a public IP address is created when creating the VM (because a Network Interface was not provided),
determines if the public IP address remains permanently associated with the Network Interface. If set
to 'Dynamic' the public IP address may change any time the VM is rebooted or power cycled.
choices:
@@ -300,7 +304,7 @@
type: list
example: ["testvm1001"]
deleted_public_ips:
- description: List of deleted publid IP addrees names.
+ description: List of deleted public IP address names.
returned: 'on delete'
type: list
example: ["testvm1001"]
@@ -573,7 +577,7 @@ def exec_module(self, **kwargs):
# Verify parameters and resolve any defaults
if self.vm_size and not self.vm_size_is_valid():
- self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".foramt(
+ self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format(
self.vm_size
))
@@ -913,7 +917,7 @@ def serialize_vm(self, vm):
interface_dict['name'] = int_dict['networkInterfaces']
interface_dict['properties'] = nic_dict['properties']
- # Expand public IPs to include config porperties
+ # Expand public IPs to include config properties
for interface in result['properties']['networkProfile']['networkInterfaces']:
for config in interface['properties']['ipConfigurations']:
if config['properties'].get('publicIPAddress'):
diff --git a/cloud/azure/azure_rm_virtualmachineimage_facts.py b/cloud/azure/azure_rm_virtualmachineimage_facts.py
index 45aa8bbacdf..3efdc57db39 100644
--- a/cloud/azure/azure_rm_virtualmachineimage_facts.py
+++ b/cloud/azure/azure_rm_virtualmachineimage_facts.py
@@ -20,6 +20,10 @@
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_virtualmachineimage_facts
diff --git a/cloud/azure/azure_rm_virtualnetwork.py b/cloud/azure/azure_rm_virtualnetwork.py
index 417e999b635..97494444eea 100644
--- a/cloud/azure/azure_rm_virtualnetwork.py
+++ b/cloud/azure/azure_rm_virtualnetwork.py
@@ -20,6 +20,10 @@
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_virtualnetwork
diff --git a/cloud/azure/azure_rm_virtualnetwork_facts.py b/cloud/azure/azure_rm_virtualnetwork_facts.py
index 5f9f94c8097..229df95b74c 100644
--- a/cloud/azure/azure_rm_virtualnetwork_facts.py
+++ b/cloud/azure/azure_rm_virtualnetwork_facts.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: azure_rm_virtualnetwork_facts
diff --git a/cloud/digital_ocean/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py
index 1a5e879ba6f..2cdcbd6420c 100644
--- a/cloud/digital_ocean/digital_ocean.py
+++ b/cloud/digital_ocean/digital_ocean.py
@@ -15,6 +15,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: digital_ocean
@@ -109,7 +113,7 @@
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token.
- As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token).
- - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired.
+ - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired.
Upgrade Ansible or, if unable to, try downloading the latest version of this module from github and putting it into a 'library' directory.
requirements:
- "python >= 2.6"
@@ -141,11 +145,13 @@
region_id: ams2
image_id: fedora-19-x64
wait_timeout: 500
-
register: my_droplet
-- debug: msg="ID is {{ my_droplet.droplet.id }}"
-- debug: msg="IP is {{ my_droplet.droplet.ip_address }}"
+- debug:
+ msg: "ID is {{ my_droplet.droplet.id }}"
+
+- debug:
+ msg: "IP is {{ my_droplet.droplet.ip_address }}"
# Ensure a droplet is present
# If droplet id already exist, will return the droplet details and changed = False
@@ -180,26 +186,39 @@
import os
import time
+import traceback
+
from distutils.version import LooseVersion
-HAS_DOPY = True
+try:
+ import six
+ HAS_SIX = True
+except ImportError:
+ HAS_SIX = False
+
+HAS_DOPY = False
try:
import dopy
from dopy.manager import DoError, DoManager
- if LooseVersion(dopy.__version__) < LooseVersion('0.3.2'):
- HAS_DOPY = False
+ if LooseVersion(dopy.__version__) >= LooseVersion('0.3.2'):
+ HAS_DOPY = True
except ImportError:
- HAS_DOPY = False
+ pass
-class TimeoutError(DoError):
- def __init__(self, msg, id):
+from ansible.module_utils.basic import AnsibleModule
+
+
+class TimeoutError(Exception):
+ def __init__(self, msg, id_):
super(TimeoutError, self).__init__(msg)
- self.id = id
+ self.id = id_
+
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
+
class Droplet(JsonfyMixIn):
manager = None
@@ -283,6 +302,7 @@ def list_all(cls):
json = cls.manager.all_active_droplets()
return map(cls, json)
+
class SSH(JsonfyMixIn):
manager = None
@@ -318,6 +338,7 @@ def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
+
def core(module):
def getkeyordie(k):
v = module.params[k]
@@ -385,7 +406,7 @@ def getkeyordie(k):
if not droplet:
module.exit_json(changed=False, msg='The droplet is not found.')
- event_json = droplet.destroy()
+ droplet.destroy()
module.exit_json(changed=True)
elif command == 'ssh':
@@ -440,6 +461,8 @@ def main():
['id', 'name'],
),
)
+ if not HAS_DOPY and not HAS_SIX:
+ module.fail_json(msg='dopy >= 0.3.2 is required for this module. dopy requires six but six is not installed. Make sure both dopy and six are installed.')
if not HAS_DOPY:
module.fail_json(msg='dopy >= 0.3.2 required for this module')
@@ -448,10 +471,7 @@ def main():
except TimeoutError as e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
- module.fail_json(msg=str(e))
-
-# import module snippets
-from ansible.module_utils.basic import *
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
diff --git a/cloud/digital_ocean/digital_ocean_block_storage.py b/cloud/digital_ocean/digital_ocean_block_storage.py
new file mode 100644
index 00000000000..73a76cd737b
--- /dev/null
+++ b/cloud/digital_ocean/digital_ocean_block_storage.py
@@ -0,0 +1,348 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: digital_ocean_block_storage
+short_description: Create/destroy or attach/detach Block Storage volumes in DigitalOcean
+description:
+ - Create/destroy Block Storage volume in DigitalOcean, or attach/detach Block Storage volume to a droplet.
+version_added: "2.2"
+options:
+ command:
+ description:
+ - Which operation do you want to perform.
+ choices: ['create', 'attach']
+ required: true
+ state:
+ description:
+ - Indicate desired state of the target.
+ choices: ['present', 'absent']
+ required: true
+ api_token:
+ description:
+ - DigitalOcean api token.
+ required: true
+ block_size:
+ description:
+ - The size of the Block Storage volume in gigabytes. Required when command=create and state=present.
+ volume_name:
+ description:
+ - The name of the Block Storage volume.
+ required: true
+ description:
+ description:
+ - Description of the Block Storage volume.
+ region:
+ description:
+ - The slug of the region where your Block Storage volume should be located in.
+ required: true
+ droplet_id:
+ description:
+ - The droplet id you want to operate on. Required when command=attach.
+ timeout:
+ description:
+ - The timeout in seconds used for polling DigitalOcean's API.
+ default: 10
+
+notes:
+ - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN.
+ They both refer to the v2 token.
+
+author:
+ - "Harnek Sidhu (github: @harneksidhu)"
+'''
+
+EXAMPLES = '''
+# Create new Block Storage
+- digital_ocean_block_storage:
+ state: present
+ command: create
+ api_token:
+ region: nyc1
+ block_size: 10
+ volume_name: nyc1-block-storage
+# Delete Block Storage
+- digital_ocean_block_storage:
+ state: absent
+ command: create
+ api_token:
+ region: nyc1
+ volume_name: nyc1-block-storage
+# Attach Block Storage to a Droplet
+- digital_ocean_block_storage:
+ state: present
+ command: attach
+ api_token:
+ volume_name: nyc1-block-storage
+ region: nyc1
+ droplet_id:
+# Detach Block Storage from a Droplet
+- digital_ocean_block_storage:
+ state: absent
+ command: attach
+ api_token:
+ volume_name: nyc1-block-storage
+ region: nyc1
+ droplet_id:
+'''
+
+RETURN = '''
+id:
+ description: Unique identifier of a Block Storage volume returned during creation.
+ returned: changed
+ type: string
+ sample: "69b25d9a-494c-12e6-a5af-001f53126b44"
+'''
+
+import json
+import os
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import fetch_url
+
+
+class DOBlockStorageException(Exception):
+ pass
+
+
+class Response(object):
+
+ def __init__(self, resp, info):
+ self.body = None
+ if resp:
+ self.body = resp.read()
+ self.info = info
+
+ @property
+ def json(self):
+ if self.body:
+ return json.loads(self.body)
+ elif "body" in self.info:
+ return json.loads(self.info["body"])
+ else:
+ return None
+
+ @property
+ def status_code(self):
+ return self.info["status"]
+
+
+class Rest(object):
+
+ def __init__(self, module, headers):
+ self.module = module
+ self.headers = headers
+ self.baseurl = 'https://api.digitalocean.com/v2'
+
+ def _url_builder(self, path):
+ if path[0] == '/':
+ path = path[1:]
+ return '%s/%s' % (self.baseurl, path)
+
+ def send(self, method, path, data=None, headers=None):
+ url = self._url_builder(path)
+ data = self.module.jsonify(data)
+
+ resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
+
+ return Response(resp, info)
+
+ def get(self, path, data=None, headers=None):
+ return self.send('GET', path, data, headers)
+
+ def put(self, path, data=None, headers=None):
+ return self.send('PUT', path, data, headers)
+
+ def post(self, path, data=None, headers=None):
+ return self.send('POST', path, data, headers)
+
+ def delete(self, path, data=None, headers=None):
+ return self.send('DELETE', path, data, headers)
+
+
+class DOBlockStorage(object):
+
+ def __init__(self, module):
+ api_token = module.params['api_token'] or \
+ os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY']
+ self.module = module
+ self.rest = Rest(module, {'Authorization': 'Bearer {}'.format(api_token),
+ 'Content-type': 'application/json'})
+
+ def get_key_or_fail(self, k):
+ v = self.module.params[k]
+ if v is None:
+ self.module.fail_json(msg='Unable to load %s' % k)
+ return v
+
+ def poll_action_for_complete_status(self, action_id):
+ url = 'actions/{}'.format(action_id)
+ end_time = time.time() + self.module.params['timeout']
+ while time.time() < end_time:
+ time.sleep(2)
+ response = self.rest.get(url)
+ status = response.status_code
+ json = response.json
+ if status == 200:
+ if json['action']['status'] == 'completed':
+ return True
+ elif json['action']['status'] == 'errored':
+ raise DOBlockStorageException(json['message'])
+ raise DOBlockStorageException('Unable to reach api.digitalocean.com')
+
+ def get_attached_droplet_ID(self, volume_name, region):
+ url = 'volumes?name={}®ion={}'.format(volume_name, region)
+ response = self.rest.get(url)
+ status = response.status_code
+ json = response.json
+ if status == 200:
+ volumes = json['volumes']
+ if len(volumes) > 0:
+ droplet_ids = volumes[0]['droplet_ids']
+ if len(droplet_ids) > 0:
+ return droplet_ids[0]
+ return None
+ else:
+ raise DOBlockStorageException(json['message'])
+
+ def attach_detach_block_storage(self, method, volume_name, region, droplet_id):
+ data = {
+ 'type': method,
+ 'volume_name': volume_name,
+ 'region': region,
+ 'droplet_id': droplet_id
+ }
+ response = self.rest.post('volumes/actions', data=data)
+ status = response.status_code
+ json = response.json
+ if status == 202:
+ return self.poll_action_for_complete_status(json['action']['id'])
+ elif status == 200:
+ return True
+ elif status == 422:
+ return False
+ else:
+ raise DOBlockStorageException(json['message'])
+
+ def create_block_storage(self):
+ block_size = self.get_key_or_fail('block_size')
+ volume_name = self.get_key_or_fail('volume_name')
+ region = self.get_key_or_fail('region')
+ description = self.module.params['description']
+ data = {
+ 'size_gigabytes': block_size,
+ 'name': volume_name,
+ 'description': description,
+ 'region': region
+ }
+ response = self.rest.post("volumes", data=data)
+ status = response.status_code
+ json = response.json
+ if status == 201:
+ self.module.exit_json(changed=True, id=json['volume']['id'])
+ elif status == 409 and json['id'] == 'already_exists':
+ self.module.exit_json(changed=False)
+ else:
+ raise DOBlockStorageException(json['message'])
+
+ def delete_block_storage(self):
+ volume_name = self.get_key_or_fail('volume_name')
+ region = self.get_key_or_fail('region')
+ url = 'volumes?name={}®ion={}'.format(volume_name, region)
+ attached_droplet_id = self.get_attached_droplet_ID(volume_name, region)
+ if attached_droplet_id is not None:
+ self.attach_detach_block_storage('detach', volume_name, region, attached_droplet_id)
+ response = self.rest.delete(url)
+ status = response.status_code
+ json = response.json
+ if status == 204:
+ self.module.exit_json(changed=True)
+ elif status == 404:
+ self.module.exit_json(changed=False)
+ else:
+ raise DOBlockStorageException(json['message'])
+
+ def attach_block_storage(self):
+ volume_name = self.get_key_or_fail('volume_name')
+ region = self.get_key_or_fail('region')
+ droplet_id = self.get_key_or_fail('droplet_id')
+ attached_droplet_id = self.get_attached_droplet_ID(volume_name, region)
+ if attached_droplet_id is not None:
+ if attached_droplet_id == droplet_id:
+ self.module.exit_json(changed=False)
+ else:
+ self.attach_detach_block_storage('detach', volume_name, region, attached_droplet_id)
+ changed_status = self.attach_detach_block_storage('attach', volume_name, region, droplet_id)
+ self.module.exit_json(changed=changed_status)
+
+ def detach_block_storage(self):
+ volume_name = self.get_key_or_fail('volume_name')
+ region = self.get_key_or_fail('region')
+ droplet_id = self.get_key_or_fail('droplet_id')
+ changed_status = self.attach_detach_block_storage('detach', volume_name, region, droplet_id)
+ self.module.exit_json(changed=changed_status)
+
+
+def handle_request(module):
+ block_storage = DOBlockStorage(module)
+ command = module.params['command']
+ state = module.params['state']
+ if command == 'create':
+ if state == 'present':
+ block_storage.create_block_storage()
+ elif state == 'absent':
+ block_storage.delete_block_storage()
+ elif command == 'attach':
+ if state == 'present':
+ block_storage.attach_block_storage()
+ elif state == 'absent':
+ block_storage.detach_block_storage()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state = dict(choices=['present', 'absent'], required=True),
+ command = dict(choices=['create', 'attach'], required=True),
+ api_token = dict(aliases=['API_TOKEN'], no_log=True),
+ block_size = dict(type='int'),
+ volume_name = dict(type='str', required=True),
+ description = dict(type='str'),
+ region = dict(type='str', required=True),
+ droplet_id = dict(type='int'),
+ timeout = dict(type='int', default=10),
+ ),
+ )
+ try:
+ handle_request(module)
+ except DOBlockStorageException:
+ e = get_exception()
+ module.fail_json(msg=e.message)
+ except KeyError:
+ e = get_exception()
+ module.fail_json(msg='Unable to load %s' % e.message)
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/digital_ocean/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py
index 565fec030f0..61fc6c8eeac 100644
--- a/cloud/digital_ocean/digital_ocean_domain.py
+++ b/cloud/digital_ocean/digital_ocean_domain.py
@@ -15,6 +15,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: digital_ocean_domain
@@ -46,7 +50,7 @@
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token.
- As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token).
- - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired.
+ - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired.
requirements:
- "python >= 2.6"
@@ -82,7 +86,7 @@
'''
import os
-import time
+import traceback
try:
from dopy.manager import DoError, DoManager
@@ -90,15 +94,14 @@
except ImportError as e:
HAS_DOPY = False
-class TimeoutError(DoError):
- def __init__(self, msg, id):
- super(TimeoutError, self).__init__(msg)
- self.id = id
+from ansible.module_utils.basic import AnsibleModule
+
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
+
class DomainRecord(JsonfyMixIn):
manager = None
@@ -106,7 +109,7 @@ def __init__(self, json):
self.__dict__.update(json)
update_attr = __init__
- def update(self, data = None, record_type = None):
+ def update(self, data=None, record_type=None):
json = self.manager.edit_domain_record(self.domain_id,
self.id,
record_type if record_type is not None else self.record_type,
@@ -118,6 +121,7 @@ def destroy(self):
json = self.manager.destroy_domain_record(self.domain_id, self.id)
return json
+
class Domain(JsonfyMixIn):
manager = None
@@ -165,6 +169,7 @@ def find(cls, name=None, id=None):
return False
+
def core(module):
def getkeyordie(k):
v = module.params[k]
@@ -236,12 +241,8 @@ def main():
try:
core(module)
- except TimeoutError as e:
- module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
- module.fail_json(msg=str(e))
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
-# import module snippets
-from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
diff --git a/cloud/digital_ocean/digital_ocean_sshkey.py b/cloud/digital_ocean/digital_ocean_sshkey.py
index 25f3b1ef7d0..00c31c788e9 100644
--- a/cloud/digital_ocean/digital_ocean_sshkey.py
+++ b/cloud/digital_ocean/digital_ocean_sshkey.py
@@ -15,6 +15,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
@@ -69,7 +73,7 @@
'''
import os
-import time
+import traceback
try:
from dopy.manager import DoError, DoManager
@@ -77,15 +81,14 @@
except ImportError:
HAS_DOPY = False
-class TimeoutError(DoError):
- def __init__(self, msg, id):
- super(TimeoutError, self).__init__(msg)
- self.id = id
+from ansible.module_utils.basic import AnsibleModule
+
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
+
class SSH(JsonfyMixIn):
manager = None
@@ -121,6 +124,7 @@ def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
+
def core(module):
def getkeyordie(k):
v = module.params[k]
@@ -135,7 +139,6 @@ def getkeyordie(k):
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
- changed = True
state = module.params['state']
SSH.setup(client_id, api_key)
@@ -154,6 +157,7 @@ def getkeyordie(k):
key.destroy()
module.exit_json(changed=True)
+
def main():
module = AnsibleModule(
argument_spec = dict(
@@ -173,12 +177,8 @@ def main():
try:
core(module)
- except TimeoutError as e:
- module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
- module.fail_json(msg=str(e))
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
-# import module snippets
-from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
diff --git a/cloud/digital_ocean/digital_ocean_tag.py b/cloud/digital_ocean/digital_ocean_tag.py
new file mode 100644
index 00000000000..3ad387a90c3
--- /dev/null
+++ b/cloud/digital_ocean/digital_ocean_tag.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: digital_ocean_tag
+short_description: Create and remove tag(s) to DigitalOcean resource.
+description:
+ - Create and remove tag(s) to DigitalOcean resource.
+version_added: "2.2"
+options:
+ name:
+ description:
+ - The name of the tag. The supported characters for names include
+ alphanumeric characters, dashes, and underscores.
+ required: true
+ resource_id:
+ description:
+ - The ID of the resource to operate on.
+ resource_type:
+ description:
+ - The type of resource to operate on. Currently only tagging of
+ droplets is supported.
+ default: droplet
+ choices: ['droplet']
+ state:
+ description:
+ - Whether the tag should be present or absent on the resource.
+ default: present
+ choices: ['present', 'absent']
+ api_token:
+ description:
+ - DigitalOcean api token.
+
+notes:
+ - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN.
+ They both refer to the v2 token.
+ - As of Ansible 2.0, Version 2 of the DigitalOcean API is used.
+
+requirements:
+ - "python >= 2.6"
+'''
+
+
+EXAMPLES = '''
+- name: create a tag
+ digital_ocean_tag:
+ name: production
+ state: present
+
+- name: tag a resource; creating the tag if it does not exists
+ digital_ocean_tag:
+ name: "{{ item }}"
+ resource_id: YYY
+ state: present
+ with_items:
+ - staging
+ - dbserver
+
+- name: untag a resource
+ digital_ocean_tag:
+ name: staging
+ resource_id: YYY
+ state: absent
+
+# Deleting a tag also untags all the resources that have previously been
+# tagged with it
+- name: remove a tag
+ digital_ocean_tag:
+ name: dbserver
+ state: absent
+'''
+
+
+RETURN = '''
+data:
+ description: a DigitalOcean Tag resource
+ returned: success and no resource constraint
+ type: dict
+ sample: {
+ "tag": {
+ "name": "awesome",
+ "resources": {
+ "droplets": {
+ "count": 0,
+ "last_tagged": null
+ }
+ }
+ }
+ }
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class Response(object):
+
+ def __init__(self, resp, info):
+ self.body = None
+ if resp:
+ self.body = resp.read()
+ self.info = info
+
+ @property
+ def json(self):
+ if not self.body:
+ if "body" in self.info:
+ return json.loads(self.info["body"])
+ return None
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ return None
+
+ @property
+ def status_code(self):
+ return self.info["status"]
+
+
+class Rest(object):
+
+ def __init__(self, module, headers):
+ self.module = module
+ self.headers = headers
+ self.baseurl = 'https://api.digitalocean.com/v2'
+
+ def _url_builder(self, path):
+ if path[0] == '/':
+ path = path[1:]
+ return '%s/%s' % (self.baseurl, path)
+
+ def send(self, method, path, data=None, headers=None):
+ url = self._url_builder(path)
+ data = self.module.jsonify(data)
+
+ resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
+
+ return Response(resp, info)
+
+ def get(self, path, data=None, headers=None):
+ return self.send('GET', path, data, headers)
+
+ def put(self, path, data=None, headers=None):
+ return self.send('PUT', path, data, headers)
+
+ def post(self, path, data=None, headers=None):
+ return self.send('POST', path, data, headers)
+
+ def delete(self, path, data=None, headers=None):
+ return self.send('DELETE', path, data, headers)
+
+
+def core(module):
+ try:
+ api_token = module.params['api_token'] or \
+ os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY']
+ except KeyError as e:
+ module.fail_json(msg='Unable to load %s' % e.message)
+
+ state = module.params['state']
+ name = module.params['name']
+ resource_id = module.params['resource_id']
+ resource_type = module.params['resource_type']
+
+ rest = Rest(module, {'Authorization': 'Bearer {}'.format(api_token),
+ 'Content-type': 'application/json'})
+
+ if state in ('present'):
+ if name is None:
+ module.fail_json(msg='parameter `name` is missing')
+
+ # Ensure Tag exists
+ response = rest.post("tags", data={'name': name})
+ status_code = response.status_code
+ json = response.json
+ if status_code == 201:
+ changed = True
+ elif status_code == 422:
+ changed = False
+ else:
+ module.exit_json(changed=False, data=json)
+
+ if resource_id is None:
+ # No resource defined, we're done.
+ if json is None:
+ module.exit_json(changed=changed, data=json)
+ else:
+ module.exit_json(changed=changed, data=json)
+ else:
+ # Tag a resource
+ url = "tags/{}/resources".format(name)
+ payload = {
+ 'resources': [{
+ 'resource_id': resource_id,
+ 'resource_type': resource_type}]}
+ response = rest.post(url, data=payload)
+ if response.status_code == 204:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="error tagging resource '{}': {}".format(
+ resource_id, response.json["message"]))
+
+ elif state in ('absent'):
+ if name is None:
+ module.fail_json(msg='parameter `name` is missing')
+
+ if resource_id:
+ url = "tags/{}/resources".format(name)
+ payload = {
+ 'resources': [{
+ 'resource_id': resource_id,
+ 'resource_type': resource_type}]}
+ response = rest.delete(url, data=payload)
+ else:
+ url = "tags/{}".format(name)
+ response = rest.delete(url)
+ if response.status_code == 204:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False, data=response.json)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ resource_id=dict(aliases=['droplet_id'], type='int'),
+ resource_type=dict(choices=['droplet'], default='droplet'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ api_token=dict(aliases=['API_TOKEN'], no_log=True),
+ )
+ )
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/docker/_docker.py b/cloud/docker/_docker.py
index bab50fcbb86..08adf3b9071 100644
--- a/cloud/docker/_docker.py
+++ b/cloud/docker/_docker.py
@@ -21,6 +21,10 @@
######################################################################
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: docker
@@ -1303,7 +1307,7 @@ def get_differing_containers(self):
for name, value in self.module.params.get('labels').iteritems():
expected_labels[name] = str(value)
- if type(container['Config']['Labels']) is dict:
+ if isinstance(container['Config']['Labels'], dict):
actual_labels = container['Config']['Labels']
else:
for container_label in container['Config']['Labels'] or []:
diff --git a/cloud/docker/docker_container.py b/cloud/docker/docker_container.py
index d1cdd952044..f921ce47177 100644
--- a/cloud/docker/docker_container.py
+++ b/cloud/docker/docker_container.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: docker_container
@@ -124,7 +128,8 @@
required: false
exposed_ports:
description:
- - List of additional container ports to expose for port mappings or links.
+ - List of additional container ports which informs Docker that the container
+ listens on the specified network ports at runtime.
If the port is already exposed using EXPOSE in a Dockerfile, it does not
need to be exposed again.
default: null
@@ -200,7 +205,7 @@
required: false
log_driver:
description:
- - Specify the logging driver.
+ - Specify the logging driver. Docker uses json-file by default.
choices:
- json-file
- syslog
@@ -209,7 +214,7 @@
- fluentd
- awslogs
- splunk
- default: json-file
+ default: null
required: false
log_options:
description:
@@ -276,6 +281,12 @@
- Whether or not to disable OOM Killer for the container.
default: false
required: false
+ oom_score_adj:
+ description:
+ - An integer value containing the score given to the container in order to tune OOM killer preferences.
+ default: 0
+ required: false
+ version_added: "2.2"
paused:
description:
- Use with the started state to pause running processes inside the container.
@@ -283,7 +294,7 @@
required: false
pid_mode:
description:
- - Set the PID namespace mode for the container. Currenly only supports 'host'.
+ - Set the PID namespace mode for the container. Currently only supports 'host'.
default: null
required: false
privileged:
@@ -355,7 +366,7 @@
description:
- Size of `/dev/shm`. The format is ``. `number` must be greater than `0`.
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes).
- - Ommitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`.
+ - Omitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`.
default: null
required: false
security_opts:
@@ -405,7 +416,7 @@
description:
- If true, skip image verification.
default: false
- requried: false
+ required: false
tty:
description:
- Allocate a psuedo-TTY.
@@ -508,7 +519,7 @@
image: ubuntu:14.04
command: sleep infinity
-- name: Stop a contianer
+- name: Stop a container
docker_container:
name: mycontainer
state: stopped
@@ -564,7 +575,6 @@
- name: Add container to networks
docker_container:
- docker_container:
name: sleepy
networks:
- name: TestingNet
@@ -713,6 +723,7 @@ def __init__(self, client):
self.network_mode = None
self.networks = None
self.oom_killer = None
+ self.oom_score_adj = None
self.paused = None
self.pid_mode = None
self.privileged = None
@@ -909,6 +920,7 @@ def _host_config(self):
mem_limit='memory',
memswap_limit='memory_swap',
mem_swappiness='memory_swappiness',
+ oom_score_adj='oom_score_adj',
shm_size='shm_size',
group_add='groups',
devices='devices',
@@ -1173,6 +1185,7 @@ def has_different_configuration(self, image):
self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
self.parameters.expected_env = self._get_expected_env(image)
self.parameters.expected_cmd = self._get_expected_cmd()
+ self.parameters.expected_devices = self._get_expected_devices()
if not self.container.get('HostConfig'):
self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
@@ -1191,6 +1204,12 @@ def has_different_configuration(self, image):
# assuming if the container was running, it must have been detached.
detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
+ # "ExposedPorts": null returns None type & causes AttributeError - PR #5517
+ if config.get('ExposedPorts') is not None:
+ expected_exposed = [re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()]
+ else:
+ expected_exposed = []
+
# Map parameters to container inspect results
config_mapping = dict(
image=config.get('Image'),
@@ -1200,14 +1219,14 @@ def has_different_configuration(self, image):
detach=detach,
interactive=config.get('OpenStdin'),
capabilities=host_config.get('CapAdd'),
- devices=host_config.get('Devices'),
+ expected_devices=host_config.get('Devices'),
dns_servers=host_config.get('Dns'),
dns_opts=host_config.get('DnsOptions'),
dns_search_domains=host_config.get('DnsSearch'),
expected_env=(config.get('Env') or []),
expected_entrypoint=config.get('Entrypoint'),
expected_etc_hosts=host_config['ExtraHosts'],
- expected_exposed=[re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()],
+ expected_exposed=expected_exposed,
groups=host_config.get('GroupAdd'),
ipc_mode=host_config.get("IpcMode"),
labels=config.get('Labels'),
@@ -1218,6 +1237,7 @@ def has_different_configuration(self, image):
memory_swappiness=host_config.get('MemorySwappiness'),
network_mode=host_config.get('NetworkMode'),
oom_killer=host_config.get('OomKillDisable'),
+ oom_score_adj=host_config.get('OomScoreAdj'),
pid_mode=host_config.get('PidMode'),
privileged=host_config.get('Privileged'),
expected_ports=host_config.get('PortBindings'),
@@ -1329,6 +1349,7 @@ def has_different_resource_limits(self):
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
+ oom_score_adj=host_config.get('OomScoreAdj'),
)
differences = []
@@ -1423,6 +1444,37 @@ def has_extra_networks(self):
extra_networks.append(dict(name=network, id=network_config['NetworkID']))
return extra, extra_networks
+ def _get_expected_devices(self):
+ if not self.parameters.devices:
+ return None
+ expected_devices = []
+ for device in self.parameters.devices:
+ parts = device.split(':')
+ if len(parts) == 1:
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[0],
+ PathOnHost=parts[0]
+ ))
+ elif len(parts) == 2:
+ parts = device.split(':')
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ )
+ )
+ else:
+ expected_devices.append(
+ dict(
+ CgroupPermissions=parts[2],
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ ))
+ return expected_devices
+
def _get_expected_entrypoint(self):
self.log('_get_expected_entrypoint')
if not self.parameters.entrypoint:
@@ -1757,7 +1809,7 @@ def _add_networks(self, container, differences):
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
if not self.check_mode:
try:
- self.log("Connecting conainer to network %s" % diff['parameter']['id'])
+ self.log("Connecting container to network %s" % diff['parameter']['id'])
self.log(params, pretty_print=True)
self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
except Exception as exc:
@@ -1767,9 +1819,9 @@ def _add_networks(self, container, differences):
def _purge_networks(self, container, networks):
for network in networks:
self.results['actions'].append(dict(removed_from_network=network['name']))
- if not self.check_mode and network.get('id'):
+ if not self.check_mode:
try:
- self.client.disconnect_container_from_network(container.Id, network['id'])
+ self.client.disconnect_container_from_network(container.Id, network['name'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
str(exc)))
@@ -1906,7 +1958,7 @@ def main():
kill_signal=dict(type='str'),
labels=dict(type='dict'),
links=dict(type='list'),
- log_driver=dict(type='str', choices=['json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs', 'splunk'], default='json-file'),
+ log_driver=dict(type='str', choices=['json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs', 'splunk'], default=None),
log_options=dict(type='dict', aliases=['log_opt']),
mac_address=dict(type='str'),
memory=dict(type='str', default='0'),
@@ -1917,17 +1969,18 @@ def main():
network_mode=dict(type='str'),
networks=dict(type='list'),
oom_killer=dict(type='bool'),
+ oom_score_adj=dict(type='int'),
paused=dict(type='bool', default=False),
pid_mode=dict(type='str'),
privileged=dict(type='bool', default=False),
published_ports=dict(type='list', aliases=['ports']),
pull=dict(type='bool', default=False),
- purge_networks=dict(type='bool', deault=False),
+ purge_networks=dict(type='bool', default=False),
read_only=dict(type='bool', default=False),
recreate=dict(type='bool', default=False),
restart=dict(type='bool', default=False),
restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
- restart_retries=dict(type='int', default=0),
+ restart_retries=dict(type='int', default=None),
shm_size=dict(type='str'),
security_opts=dict(type='list'),
state=dict(type='str', choices=['absent', 'present', 'started', 'stopped'], default='started'),
diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py
index 0a9ab09096f..0de16632e03 100644
--- a/cloud/docker/docker_image.py
+++ b/cloud/docker/docker_image.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: docker_image
@@ -206,7 +210,7 @@
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
-- name: Build an image ad push it to a private repo
+- name: Build an image and push it to a private repo
docker_image:
path: ./sinatra
name: registry.ansible.com/chouseknecht/sinatra
@@ -224,7 +228,6 @@
tag: v1
push: yes
load_path: my_sinatra.tar
- push: True
- name: Build image and with buildargs
docker_image:
@@ -424,8 +427,7 @@ def push_image(self, name, tag=None):
if not self.check_mode:
status = None
try:
- for line in self.client.push(repository, tag=tag, stream=True):
- line = json.loads(line)
+ for line in self.client.push(repository, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('errorDetail'):
raise Exception(line['errorDetail']['message'])
diff --git a/cloud/docker/docker_image_facts.py b/cloud/docker/docker_image_facts.py
index 600491ec52e..81e16fe5e2a 100644
--- a/cloud/docker/docker_image_facts.py
+++ b/cloud/docker/docker_image_facts.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: docker_image_facts
diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py
index 0af5b4fd947..f0b3c87ec39 100644
--- a/cloud/docker/docker_login.py
+++ b/cloud/docker/docker_login.py
@@ -21,23 +21,24 @@
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: docker_login
-
short_description: Log into a Docker registry.
-
version_added: "2.0"
-
description:
- Provides functionality similar to the "docker login" command.
- Authenticate with a docker registry and add the credentials to your local Docker config file. Adding the
credentials to the config files allows future connections to the registry using tools such as Ansible's Docker
modules, the Docker CLI and docker-py without needing to provide credentials.
- Running in check mode will perform the authentication without updating the config file.
-
options:
registry_url:
+ required: False
description:
- The registry URL.
default: "https://index.docker.io/v1/"
@@ -47,40 +48,51 @@
username:
description:
- The username for the registry account
- required: true
- default: null
+ required: True
password:
description:
- The plaintext password for the registry account
- required: true
- default: null
+ required: True
email:
+ required: False
description:
- "The email address for the registry account. NOTE: private registries may not require this,
but Docker Hub requires it."
default: None
reauthorize:
+ required: False
description:
- Refresh exiting authentication found in the configuration file.
- default: false
+ default: no
+ choices: ['yes', 'no']
aliases:
- reauth
config_path:
description:
- Custom path to the Docker CLI configuration file.
default: ~/.docker/config.json
+ required: False
aliases:
- self.config_path
- dockercfg_path
+ state:
+ version_added: '2.3'
+ description:
+ - This controls the current state of the user. C(present) will login in a user, C(absent) will log him out.
+ - To logout you only need the registry server, which defaults to DockerHub.
+ - Before 2.1 you could ONLY log in.
+ - docker does not support 'logout' with a custom config file.
+ choices: ['present', 'absent']
+ default: 'present'
+ required: False
extends_documentation_fragment:
- docker
-
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
-
+ - 'Only to be able to logout (state=absent): the docker command line utility'
authors:
- "Olaf Kilian "
- "Chris Houseknecht (@chouseknecht)"
@@ -109,20 +121,16 @@
email: docker@docker.io
config_path: /tmp/.mydockercfg
+- name: Log out of DockerHub
+ docker_login:
+ state: absent
+ email: docker@docker.com
'''
RETURN = '''
-actions:
- description: List of actions taken by the module.
- returned: always
- type: list
- sample: [
- "Log into https://index.docker.io/v1/",
- "Updated config file /Users/chouseknecht/.docker/config.json with new authorization for https://index.docker.io/v1/"
- ]
login_results:
description: Results from the login.
- returned: always
+ returned: when state='present'
type: dict
sample: {
"email": "testuer@yahoo.com",
@@ -155,7 +163,10 @@ def __init__(self, client, results):
self.reauthorize = parameters.get('reauthorize')
self.config_path = parameters.get('config_path')
- self.login()
+ if parameters['state'] == 'present':
+ self.login()
+ else:
+ self.logout()
def fail(self, msg):
self.client.fail(msg)
@@ -190,6 +201,24 @@ def login(self):
if not self.check_mode:
self.update_config_file()
+ def logout(self):
+ '''
+ Log out of the registry. On success update the config file.
+ TODO: port to API once docker.py supports this.
+
+ :return: None
+ '''
+
+ cmd = "%s logout " % self.client.module.get_bin_path('docker', True)
+ #TODO: docker does not support config file in logout, restore this when they do
+ #if self.config_path and self.config_file_exists(self.config_path):
+ # cmd += "--config '%s' " % self.config_path
+ cmd += "'%s'" % self.registry_url
+
+ (rc, out, err) = self.client.module.run_command(cmd)
+ if rc != 0:
+ self.fail("Could not log out: %s" % err)
+
def config_file_exists(self, path):
if os.path.exists(path):
self.log("Configuration file %s exists" % (path))
@@ -223,7 +252,7 @@ def update_config_file(self):
'''
If the authorization not stored in the config file or reauthorize is True,
update the config file with the new authorization.
-
+
:return: None
'''
@@ -265,16 +294,16 @@ def main():
argument_spec=dict(
registry_url=dict(type='str', required=False, default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
- username=dict(type='str', required=True),
- password=dict(type='str', required=True, no_log=True),
+ username=dict(type='str', required=False),
+ password=dict(type='str', required=False, no_log=True),
email=dict(type='str'),
reauthorize=dict(type='bool', default=False, aliases=['reauth']),
- config_path=dict(type='str', default='~/.docker/config.json', aliases=['self.config_path',
- 'dockercfg_path']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ config_path=dict(type='str', default='~/.docker/config.json', aliases=['self.config_path', 'dockercfg_path']),
)
required_if = [
- ('registry_url', DEFAULT_DOCKER_REGISTRY, ['email'])
+ ('state', 'present', ['username', 'password']),
]
client = AnsibleDockerClient(
@@ -289,7 +318,12 @@ def main():
login_result={}
)
+ if module.params['state'] == 'present' and module.params['registry_url'] == DEFAULT_DOCKER_REGISTRY and not module.params['email']:
+ module.fail_json(msg="'email' is required when loging into DockerHub")
+
LoginManager(client, results)
+ if 'actions' in results:
+ del results['actions']
client.module.exit_json(**results)
# import module snippets
diff --git a/cloud/docker/docker_network.py b/cloud/docker/docker_network.py
index f06f7d5b09e..24ce4dc6a40 100644
--- a/cloud/docker/docker_network.py
+++ b/cloud/docker/docker_network.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: docker_network
version_added: "2.2"
@@ -69,7 +73,7 @@
ipam_driver:
description:
- - Specifiy an IPAM driver.
+ - Specify an IPAM driver.
default: null
ipam_options:
diff --git a/cloud/docker/docker_service.py b/cloud/docker/docker_service.py
index afb2e757b68..ee7c35329f4 100644
--- a/cloud/docker/docker_service.py
+++ b/cloud/docker/docker_service.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: docker_service
@@ -190,14 +194,16 @@
project_src: flask
register: output
- - debug: var=output
+ - debug:
+ var: output
- docker_service:
project_src: flask
build: no
register: output
- - debug: var=output
+ - debug:
+ var: output
- assert:
that: "not output.changed "
@@ -208,7 +214,8 @@
stopped: true
register: output
- - debug: var=output
+ - debug:
+ var: output
- assert:
that:
@@ -221,7 +228,8 @@
restarted: true
register: output
- - debug: var=output
+ - debug:
+ var: output
- assert:
that:
@@ -239,7 +247,8 @@
web: 2
register: output
- - debug: var=output
+ - debug:
+ var: output
- name: Run with inline v2 compose
hosts: localhost
@@ -268,7 +277,8 @@
- db
register: output
- - debug: var=output
+ - debug:
+ var: output
- assert:
that:
@@ -300,7 +310,8 @@
- db
register: output
- - debug: var=output
+ - debug:
+ var: output
- assert:
that:
@@ -423,7 +434,7 @@
returned: always
type: string
id:
- desription: image hash
+ description: image hash
returned: always
type: string
@@ -452,6 +463,9 @@
HAS_COMPOSE_EXC = None
MINIMUM_COMPOSE_VERSION = '1.7.0'
+import sys
+import re
+
try:
import yaml
except ImportError as exc:
@@ -463,14 +477,18 @@
try:
from compose import __version__ as compose_version
+ from compose.project import ProjectError
from compose.cli.command import project_from_options
- from compose.service import ConvergenceStrategy
+ from compose.service import ConvergenceStrategy, NoSuchImageError
from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import DEFAULT_TIMEOUT
except ImportError as exc:
HAS_COMPOSE = False
HAS_COMPOSE_EXC = str(exc)
+ DEFAULT_TIMEOUT = 10
from ansible.module_utils.docker_common import *
+from contextlib import contextmanager
AUTH_PARAM_MAPPING = {
@@ -483,6 +501,31 @@
}
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+def get_stdout(path_name):
+ full_stdout = ''
+ last_line = ''
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line.encode('ascii'))
+ full_stdout += new_line
+ if new_line.strip():
+ # Assuming last line contains the error message
+ last_line = new_line.strip().encode('utf-8')
+ fd.close()
+ os.remove(path_name)
+ return full_stdout, last_line
+
class ContainerManager(DockerBaseClass):
def __init__(self, client):
@@ -596,11 +639,11 @@ def _get_auth_options(self):
return options
def cmd_up(self):
-
+
start_deps = self.dependencies
service_names = self.services
detached = True
- result = dict(changed=False, actions=dict(), ansible_facts=dict())
+ result = dict(changed=False, actions=[], ansible_facts=dict())
up_options = {
u'--no-recreate': False,
@@ -622,49 +665,65 @@ def cmd_up(self):
self.log("convergence strategy: %s" % converge)
if self.pull:
- result.update(self.cmd_pull())
+ pull_output = self.cmd_pull()
+ result['changed'] = pull_output['changed']
+ result['actions'] += pull_output['actions']
if self.build:
- result.update(self.cmd_build())
+ build_output = self.cmd_build()
+ result['changed'] = build_output['changed']
+ result['actions'] += build_output['actions']
for service in self.project.services:
if not service_names or service.name in service_names:
plan = service.convergence_plan(strategy=converge)
if plan.action != 'noop':
result['changed'] = True
- if self.debug or self.check_mode:
- if not result['actions'].get(service.name):
- result['actions'][service.name] = dict()
- result['actions'][service.name][plan.action] = []
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
for container in plan.containers:
- result['actions'][service.name][plan.action].append(dict(
+ result_action[plan.action].append(dict(
id=container.id,
name=container.name,
short_id=container.short_id,
))
+ result['actions'].append(result_action)
if not self.check_mode and result['changed']:
+ _, fd_name = tempfile.mkstemp(prefix="ansible")
try:
- do_build = build_action_from_opts(up_options)
- self.log('Setting do_build to %s' % do_build)
- self.project.up(
- service_names=service_names,
- start_deps=start_deps,
- strategy=converge,
- do_build=do_build,
- detached=detached,
- remove_orphans=self.remove_orphans)
+ with stdout_redirector(fd_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=converge,
+ do_build=do_build,
+ detached=detached,
+ remove_orphans=self.remove_orphans,
+ timeout=self.timeout)
except Exception as exc:
- self.client.fail("Error bring %s up - %s" % (self.project.name, str(exc)))
+ full_stdout, last_line= get_stdout(fd_name)
+ self.client.module.fail_json(msg="Error starting project %s" % str(exc), module_stderr=last_line,
+ module_stdout=full_stdout)
+ else:
+ get_stdout(fd_name)
if self.stopped:
- result.update(self.cmd_stop(service_names))
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] = stop_output['changed']
+ result['actions'] += stop_output['actions']
if self.restarted:
- result.update(self.cmd_restart(service_names))
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] = restart_output['changed']
+ result['actions'] += restart_output['actions']
if self.scale:
- result.update(self.cmd_scale())
+ scale_output = self.cmd_scale()
+ result['changed'] = scale_output['changed']
+ result['actions'] += scale_output['actions']
for service in self.project.services:
result['ansible_facts'][service.name] = dict()
@@ -726,160 +785,199 @@ def cmd_up(self):
def cmd_pull(self):
result = dict(
changed=False,
- actions=dict(),
+ actions=[],
)
if not self.check_mode:
for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
self.log('Pulling image for service %s' % service.name)
# store the existing image ID
- image = service.image()
- old_image_id = None
- if image and image.get('Id'):
- old_image_id = image['Id']
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
# pull the image
- service.pull(ignore_pull_failures=False)
+ try:
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ self.client.fail("Error: pull failed with %s" % str(exc))
# store the new image ID
- image = service.image()
- new_image_id = None
- if image and image.get('Id'):
- new_image_id = image['Id']
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
if new_image_id != old_image_id:
# if a new image was pulled
result['changed'] = True
- result['actions'][service.name] = dict()
- result['actions'][service.name]['pulled_image'] = dict(
- name=service.image_name,
- id=service.image()['Id']
- )
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
return result
def cmd_build(self):
result = dict(
changed=False,
- actions=dict(),
+ actions=[]
)
if not self.check_mode:
for service in self.project.get_services(self.services, include_deps=False):
- self.log('Building image for service %s' % service.name)
if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
# store the existing image ID
- image = service.image()
- old_image_id = None
- if image and image.get('Id'):
- old_image_id = image['Id']
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
# build the image
- new_image_id = service.build(pull=True, no_cache=self.nocache)
+ try:
+ new_image_id = service.build(pull=True, no_cache=self.nocache)
+ except Exception as exc:
+ self.client.fail("Error: build failed with %s" % str(exc))
if new_image_id not in old_image_id:
# if a new image was built
result['changed'] = True
- result['actions'][service.name] = dict()
- result['actions'][service.name]['built_image'] = dict(
- name=service.image_name,
- id=service.image()['Id']
- )
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
return result
def cmd_down(self):
result = dict(
changed=False,
- actions=dict(),
+ actions=[]
)
-
for service in self.project.services:
containers = service.containers(stopped=True)
if len(containers):
result['changed'] = True
- if self.debug or self.check_mode:
- result['actions'][service.name] = dict()
- result['actions'][service.name]['deleted'] = [container.name for container in containers]
-
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
if not self.check_mode and result['changed']:
image_type = image_type_from_opt('--rmi', self.remove_images)
try:
self.project.down(image_type, self.remove_volumes, self.remove_orphans)
except Exception as exc:
- self.client.fail("Error bringing %s down - %s" % (self.project.name, str(exc)))
-
+ self.client.fail("Error stopping project - %s" % str(exc))
return result
def cmd_stop(self, service_names):
result = dict(
changed=False,
- actions=dict()
+ actions=[]
)
for service in self.project.services:
if not service_names or service.name in service_names:
- result['actions'][service.name] = dict()
- result['actions'][service.name]['stop'] = []
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
for container in service.containers(stopped=False):
result['changed'] = True
- if self.debug:
- result['actions'][service.name]['stop'].append(dict(
- id=container.id,
- name=container.name,
- short_id=container.short_id,
- ))
-
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
if not self.check_mode and result['changed']:
+ _, fd_name = tempfile.mkstemp(prefix="ansible")
try:
- self.project.stop(service_names=service_names)
+ with stdout_redirector(fd_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
except Exception as exc:
- self.client.fail("Error stopping services for %s - %s" % (self.project.name, str(exc)))
-
+ full_stdout, last_line = get_stdout(fd_name)
+ self.client.module.fail_json(msg="Error stopping project %s" % str(exc), module_stderr=last_line,
+ module_stdout=full_stdout)
+ else:
+ get_stdout(fd_name)
return result
def cmd_restart(self, service_names):
result = dict(
changed=False,
- actions=dict()
+ actions=[]
)
for service in self.project.services:
if not service_names or service.name in service_names:
- result['actions'][service.name] = dict()
- result['actions'][service.name]['restart'] = []
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
for container in service.containers(stopped=True):
result['changed'] = True
- if self.debug or self.check_mode:
- result['actions'][service.name]['restart'].append(dict(
- id=container.id,
- name=container.name,
- short_id=container.short_id,
- ))
-
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
if not self.check_mode and result['changed']:
+ _, fd_name = tempfile.mkstemp(prefix="ansible")
try:
- self.project.restart(service_names=service_names)
+ with stdout_redirector(fd_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
except Exception as exc:
- self.client.fail("Error restarting services for %s - %s" % (self.project.name, str(exc)))
-
+ full_stdout, last_line = get_stdout(fd_name)
+ self.client.module.fail_json(msg="Error restarting project %s" % str(exc), module_stderr=last_line,
+ module_stdout=full_stdout)
+ else:
+ get_stdout(fd_name)
return result
def cmd_scale(self):
result = dict(
changed=False,
- actions=dict()
+ actions=[]
)
-
for service in self.project.services:
if service.name in self.scale:
- result['actions'][service.name] = dict()
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
containers = service.containers(stopped=True)
if len(containers) != self.scale[service.name]:
result['changed'] = True
- if self.debug or self.check_mode:
- result['actions'][service.name]['scale'] = self.scale[service.name] - len(containers)
+ service_res['scale'] = self.scale[service.name] - len(containers)
if not self.check_mode:
try:
- service.scale(self.scale[service.name])
+ service.scale(int(self.scale[service.name]))
except Exception as exc:
self.client.fail("Error scaling %s - %s" % (service.name, str(exc)))
+ result['actions'].append(service_res)
return result
@@ -903,7 +1001,8 @@ def main():
dependencies=dict(type='bool', default=True),
pull=dict(type='bool', default=False),
nocache=dict(type='bool', default=False),
- debug=dict(type='bool', default=False)
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT)
)
mutually_exclusive = [
diff --git a/cloud/google/gc_storage.py b/cloud/google/gc_storage.py
index 64222d47d82..6a5e9023d02 100644
--- a/cloud/google/gc_storage.py
+++ b/cloud/google/gc_storage.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: gc_storage
@@ -89,26 +93,49 @@
'''
EXAMPLES = '''
-# upload some content
-- gc_storage: bucket=mybucket object=key.txt src=/usr/local/myfile.txt mode=put permission=public-read
-
-# upload some headers
-- gc_storage: bucket=mybucket object=key.txt src=/usr/local/myfile.txt headers='{"Content-Encoding": "gzip"}'
-
-# download some content
-- gc_storage: bucket=mybucket object=key.txt dest=/usr/local/myfile.txt mode=get
-
-# Download an object as a string to use else where in your playbook
-- gc_storage: bucket=mybucket object=key.txt mode=get_str
-
-# Create an empty bucket
-- gc_storage: bucket=mybucket mode=create
-
-# Create a bucket with key as directory
-- gc_storage: bucket=mybucket object=/my/directory/path mode=create
-
-# Delete a bucket and all contents
-- gc_storage: bucket=mybucket mode=delete
+- name: Upload some content
+ gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ permission: public-read
+
+- name: Upload some headers
+ gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ headers: '{"Content-Encoding": "gzip"}'
+
+- name: Download some content
+ gc_storage:
+ bucket: mybucket
+ object: key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Download an object as a string to use else where in your playbook
+ gc_storage:
+ bucket: mybucket
+ object: key.txt
+ mode: get_str
+
+- name: Create an empty bucket
+ gc_storage:
+ bucket: mybucket
+ mode: create
+
+- name: Create a bucket with key as directory
+ gc_storage:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+
+- name: Delete a bucket and all contents
+ gc_storage:
+ bucket: mybucket
+ mode: delete
'''
import os
diff --git a/cloud/google/gce.py b/cloud/google/gce.py
index 064a79e2f84..802a7a1393c 100644
--- a/cloud/google/gce.py
+++ b/cloud/google/gce.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: gce
@@ -23,7 +27,7 @@
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
- U(https://cloud.google.com/products/compute-engine) for an overview.
+ U(https://cloud.google.com/compute) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
@@ -89,8 +93,17 @@
default: null
name:
description:
- - identifier when working with a single instance
+ - either a name of a single instance or when used with 'num_instances',
+ the base name of a cluster of nodes
+ required: false
+ aliases: ['base_name']
+ num_instances:
+ description:
+ - can be used with 'name', specifies
+ the number of nodes to provision using 'name'
+ as a base name
required: false
+ version_added: "2.3"
network:
description:
- name of the network, 'default' will be used if not specified
@@ -142,7 +155,7 @@
external_ip:
version_added: "1.9"
description:
- - type of external ip, ephemeral by default; alternatively, a list of fixed gce ips or ip names can be given (if there is not enough specified ip, 'ephemeral' will be used)
+ - type of external ip, ephemeral by default; alternatively, a list of fixed gce ips or ip names can be given (if there is not enough specified ip, 'ephemeral' will be used). Specify 'none' if no external ip is desired.
required: false
default: "ephemeral"
disk_auto_delete:
@@ -164,96 +177,116 @@
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- - Either I(name) or I(instance_names) is required.
-author: "Eric Johnson (@erjohnso) "
+ - Either I(instance_names) or I(name) is required.
+ - JSON credentials strongly preferred.
+author: "Eric Johnson (@erjohnso) , Tom Melendez (@supertom) "
'''
EXAMPLES = '''
-# Basic provisioning example. Create a single Debian 7 instance in the
-# us-central1-a Zone of n1-standard-1 machine type.
-- local_action:
- module: gce
- name: test-instance
- zone: us-central1-a
- machine_type: n1-standard-1
- image: debian-7
-
-# Example using defaults and with metadata to create a single 'foo' instance
-- local_action:
- module: gce
- name: foo
- metadata: '{"db":"postgres", "group":"qa", "id":500}'
-
-
-# Launch instances from a control node, runs some tasks on the new instances,
-# and then terminate them
-# This example uses JSON credentials with the credentials_file parameter
-# rather than the deprecated pem_file option with PEM formatted credentials.
-
-- name: Create a sandbox instance
+# Basic provisioning example. Create a single Debian 8 instance in the
+# us-central1-a Zone of the n1-standard-1 machine type.
+# Create multiple instances by specifying multiple names, seperated by
+# commas in the instance_names field
+# (e.g. my-test-instance1,my-test-instance2)
+ gce:
+ instance_names: my-test-instance1
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-8
+ state: present
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+
+# Create a single Debian 8 instance in the us-central1-a Zone
+# Use existing disks, custom network/subnetwork, set service account permissions
+# add tags and metadata.
+ gce:
+ instance_names: my-test-instance
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ state: present
+ metadata: '{"db":"postgres", "group":"qa", "id":500}'
+ tags:
+ - http-server
+ - my-other-tag
+ disks:
+ - name: disk-2
+ mode: READ_WRITE
+ - name: disk-3
+ mode: READ_ONLY
+ disk_auto_delete: false
+ network: foobar-network
+ subnetwork: foobar-subnetwork-1
+ preemptible: true
+ ip_forward: true
+ service_account_permissions:
+ - storage-full
+ - taskqueue
+ - bigquery
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+
+# Example Playbook
+- name: Compute Engine Instance Examples
hosts: localhost
vars:
- names: foo,bar
- machine_type: n1-standard-1
- image: debian-6
- zone: us-central1-a
- service_account_email: unique-email@developer.gserviceaccount.com
- credentials_file: /path/to/json_file
- project_id: project-id
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
tasks:
- - name: Launch instances
- local_action: gce instance_names={{names}} machine_type={{machine_type}}
- image={{image}} zone={{zone}}
- service_account_email={{ service_account_email }}
- credentials_file={{ credentials_file }}
- project_id={{ project_id }}
+ - name: create multiple instances
+ # Basic provisioning example. Create multiple Debian 8 instances in the
+ # us-central1-a Zone of n1-standard-1 machine type.
+ gce:
+ instance_names: test1,test2,test3
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-8
+ state: present
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ metadata : '{ "startup-script" : "apt-get update" }'
register: gce
- - name: Wait for SSH to come up
- local_action: wait_for host={{item.public_ip}} port=22 delay=10
- timeout=60 state=started
- with_items: {{gce.instance_data}}
-
-- name: Configure instance(s)
- hosts: launched
- become: True
- roles:
- - my_awesome_role
- - my_awesome_tasks
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- tasks:
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- instance_names: {{gce.instance_names}}
-
-# The deprecated PEM file credentials can be used as follows
-- name: Create a sandbox instance with PEM credentials
- hosts: localhost
- vars:
- names: foo,bar
- machine_type: n1-standard-1
- image: debian-6
- zone: us-central1-a
- service_account_email: unique-email@developer.gserviceaccount.com
- pem_file: /path/to/pem_file
- project_id: project-id
- tasks:
- - name: Launch instances
- local_action: gce instance_names={{names}} machine_type={{machine_type}}
- image={{image}} zone={{zone}}
- service_account_email={{ service_account_email }}
- pem_file={{ pem_file }}
- project_id={{ project_id }}
- register: gce
- - name: Wait for SSH to come up
- local_action: wait_for host={{item.public_ip}} port=22 delay=10
- timeout=60 state=started
- with_items: {{gce.instance_data}}
+ - name: Save host data
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: gce_instances_ips
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for SSH for instances
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 30
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Configure Hosts
+ hosts: gce_instances_ips
+ become: yes
+ become_method: sudo
+ roles:
+ - my-role-one
+ - my-role-two
+ tags:
+ - config
+
+ - name: delete test-instances
+ # Basic termination of instance.
+ gce:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ instance_names: "{{ gce.instance_names }}"
+ zone: us-central1-a
+ state: absent
+ tags:
+ - delete
'''
import socket
@@ -324,7 +357,7 @@ def get_instance_info(inst):
})
-def create_instances(module, gce, instance_names):
+def create_instances(module, gce, instance_names, number):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
@@ -422,7 +455,7 @@ def create_instances(module, gce, instance_names):
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
- if perm not in gce.SA_SCOPES_MAP.keys():
+ if perm not in gce.SA_SCOPES_MAP:
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
@@ -434,40 +467,62 @@ def create_instances(module, gce, instance_names):
module.fail_json(msg='Missing required create instance variable',
changed=False)
- for name in instance_names:
- pd = None
- if lc_disks:
- pd = lc_disks[0]
- elif persistent_boot_disk:
+ gce_args = dict(
+ location=lc_zone,
+ ex_network=network, ex_tags=tags, ex_metadata=metadata,
+ ex_can_ip_forward=ip_forward,
+ external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
+ ex_service_accounts=ex_sa_perms
+ )
+ if preemptible is not None:
+ gce_args['ex_preemptible'] = preemptible
+ if subnetwork is not None:
+ gce_args['ex_subnetwork'] = subnetwork
+
+ if isinstance(instance_names, str) and not number:
+ instance_names = [instance_names]
+
+ if isinstance(instance_names, str) and number:
+ instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
+ lc_image(), number, **gce_args)
+ for resp in instance_responses:
+ n = resp
+ if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
+ try:
+ n = gce.ex_get_node(n.name, lc_zone)
+ except ResourceNotFoundError:
+ pass
+ else:
+ # Assure that at least one node has been created to set changed=True
+ changed = True
+ new_instances.append(n)
+ else:
+ for instance in instance_names:
+ pd = None
+ if lc_disks:
+ pd = lc_disks[0]
+ elif persistent_boot_disk:
+ try:
+ pd = gce.ex_get_volume("%s" % instance, lc_zone)
+ except ResourceNotFoundError:
+ pd = gce.create_volume(None, "%s" % instance, image=lc_image())
+ gce_args['ex_boot_disk'] = pd
+
+ inst = None
try:
- pd = gce.ex_get_volume("%s" % name, lc_zone)
+ inst = gce.ex_get_node(instance, lc_zone)
except ResourceNotFoundError:
- pd = gce.create_volume(None, "%s" % name, image=lc_image())
-
- gce_args = dict(
- location=lc_zone,
- ex_network=network, ex_tags=tags, ex_metadata=metadata,
- ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
- external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
- ex_service_accounts=ex_sa_perms
- )
- if preemptible is not None:
- gce_args['ex_preemptible'] = preemptible
- if subnetwork is not None:
- gce_args['ex_subnetwork'] = subnetwork
-
- inst = None
- try:
- inst = gce.ex_get_node(name, lc_zone)
- except ResourceNotFoundError:
- inst = gce.create_node(
- name, lc_machine_type, lc_image(), **gce_args
- )
- changed = True
- except GoogleBaseError as e:
- module.fail_json(msg='Unexpected error attempting to create ' +
- 'instance %s, error: %s' % (name, e.value))
+ inst = gce.create_node(
+ instance, lc_machine_type, lc_image(), **gce_args
+ )
+ changed = True
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to create ' +
+ 'instance %s, error: %s' % (instance, e.value))
+ if inst:
+ new_instances.append(inst)
+ for inst in new_instances:
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
@@ -490,9 +545,6 @@ def create_instances(module, gce, instance_names):
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
- if inst:
- new_instances.append(inst)
-
instance_names = []
instance_json_data = []
for inst in new_instances:
@@ -502,7 +554,7 @@ def create_instances(module, gce, instance_names):
return (changed, instance_json_data, instance_names)
-def change_instance_state(module, gce, instance_names, zone_name, state):
+def change_instance_state(module, gce, instance_names, number, zone_name, state):
"""Changes the state of a list of instances. For example,
change from started to stopped, or started to absent.
@@ -516,31 +568,46 @@ def change_instance_state(module, gce, instance_names, zone_name, state):
"""
changed = False
- changed_instance_names = []
- for name in instance_names:
+ nodes = []
+ state_instance_names = []
+
+ if isinstance(instance_names, str) and number:
+ node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
+ elif isinstance(instance_names, str) and not number:
+ node_names = [instance_names]
+ else:
+ node_names = instance_names
+
+ for name in node_names:
inst = None
try:
inst = gce.ex_get_node(name, zone_name)
except ResourceNotFoundError:
- pass
+ state_instance_names.append(name)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
- if inst and state in ['absent', 'deleted']:
- gce.destroy_node(inst)
- changed_instance_names.append(inst.name)
- changed = True
- elif inst and state == 'started' and \
- inst.state == libcloud.compute.types.NodeState.STOPPED:
- gce.ex_start_node(inst)
- changed_instance_names.append(inst.name)
- changed = True
- elif inst and state in ['stopped', 'terminated'] and \
- inst.state == libcloud.compute.types.NodeState.RUNNING:
- gce.ex_stop_node(inst)
- changed_instance_names.append(inst.name)
- changed = True
-
- return (changed, changed_instance_names)
+ else:
+ nodes.append(inst)
+ state_instance_names.append(name)
+
+ if state in ['absent', 'deleted'] and number:
+ changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
+ changed = reduce(lambda x, y: x or y, changed_nodes)
+ else:
+ for node in nodes:
+ if state in ['absent', 'deleted']:
+ gce.destroy_node(node)
+ changed = True
+ elif state == 'started' and \
+ node.state == libcloud.compute.types.NodeState.STOPPED:
+ gce.ex_start_node(node)
+ changed = True
+ elif state in ['stopped', 'terminated'] and \
+ node.state == libcloud.compute.types.NodeState.RUNNING:
+ gce.ex_stop_node(node)
+ changed = True
+
+ return (changed, state_instance_names)
def main():
module = AnsibleModule(
@@ -549,7 +616,8 @@ def main():
instance_names = dict(),
machine_type = dict(default='n1-standard-1'),
metadata = dict(),
- name = dict(),
+ name = dict(aliases=['base_name']),
+ num_instances = dict(type='int'),
network = dict(default='default'),
subnetwork = dict(),
persistent_boot_disk = dict(type='bool', default=False),
@@ -568,7 +636,8 @@ def main():
external_ip=dict(default='ephemeral'),
disk_auto_delete = dict(type='bool', default=True),
preemptible = dict(type='bool', default=None),
- )
+ ),
+ mutually_exclusive=[('instance_names', 'name')]
)
if not HAS_PYTHON26:
@@ -583,6 +652,7 @@ def main():
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
name = module.params.get('name')
+ number = module.params.get('num_instances')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
persistent_boot_disk = module.params.get('persistent_boot_disk')
@@ -593,13 +663,13 @@ def main():
preemptible = module.params.get('preemptible')
changed = False
- inames = []
+ inames = None
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
- inames.append(name)
+ inames = name
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
@@ -617,20 +687,20 @@ def main():
json_output = {'zone': zone}
if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
json_output['state'] = state
- (changed, changed_instance_names) = change_instance_state(
- module, gce, inames, zone, state)
+ (changed, state_instance_names) = change_instance_state(
+ module, gce, inames, number, zone, state)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
- if instance_names:
- json_output['instance_names'] = changed_instance_names
+ if instance_names or name and number:
+ json_output['instance_names'] = state_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data, instance_name_list) = create_instances(
- module, gce, inames)
+ module, gce, inames, number)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
diff --git a/cloud/google/gce_lb.py b/cloud/google/gce_lb.py
index 32233d4b36b..dc31bb3b421 100644
--- a/cloud/google/gce_lb.py
+++ b/cloud/google/gce_lb.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: gce_lb
diff --git a/cloud/google/gce_mig.py b/cloud/google/gce_mig.py
new file mode 100644
index 00000000000..024055dff30
--- /dev/null
+++ b/cloud/google/gce_mig.py
@@ -0,0 +1,908 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: gce_mig
+version_added: "2.2"
+short_description: Create, Update or Destroy a Managed Instance Group (MIG).
+description:
+ - Create, Update or Destroy a Managed Instance Group (MIG). See
+ U(https://cloud.google.com/compute/docs/instance-groups) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 1.2.0"
+notes:
+ - Resizing and Recreating VM are also supported.
+ - An existing instance template is required in order to create a
+ Managed Instance Group.
+author:
+ - "Tom Melendez (@supertom) "
+options:
+ name:
+ description:
+ - Name of the Managed Instance Group.
+ required: true
+ template:
+ description:
+ - Instance Template to be used in creating the VMs. See
+ U(https://cloud.google.com/compute/docs/instance-templates) to learn more
+ about Instance Templates. Required for creating MIGs.
+ required: false
+ size:
+ description:
+ - Size of Managed Instance Group. If MIG already exists, it will be
+ resized to the number provided here. Required for creating MIGs.
+ required: false
+ service_account_email:
+ description:
+ - service account email
+ required: false
+ default: null
+ credentials_file:
+ description:
+ - Path to the JSON file associated with the service account email
+ default: null
+ required: false
+ project_id:
+ description:
+ - GCE project ID
+ required: false
+ default: null
+ state:
+ description:
+ - desired state of the resource
+ required: false
+ default: "present"
+ choices: ["absent", "present"]
+ zone:
+ description:
+ - The GCE zone to use for this Managed Instance Group.
+ required: true
+ autoscaling:
+ description:
+ - A dictionary of configuration for the autoscaler. 'enabled (bool)', 'name (str)'
+ and policy.max_instances (int) are required fields if autoscaling is used. See
+ U(https://cloud.google.com/compute/docs/reference/beta/autoscalers) for more information
+ on Autoscaling.
+ required: false
+ default: null
+ named_ports:
+ version_added: "2.3"
+ description:
+ - Define named ports that backend services can forward data to. Format is a a list of
+ name:port dictionaries.
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+# Following playbook creates, rebuilds instances, resizes and then deletes a MIG.
+# Notes:
+# - Two valid Instance Templates must exist in your GCE project in order to run
+# this playbook. Change the fields to match the templates used in your
+# project.
+# - The use of the 'pause' module is not required, it is just for convenience.
+- name: Managed Instance Group Example
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Create MIG
+ gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 1
+ template: my-instance-template-1
+ named_ports:
+ - name: http
+ port: 80
+ - name: foobar
+ port: 82
+
+ - name: Pause for 30 seconds
+ pause:
+ seconds: 30
+
+ - name: Recreate MIG Instances with Instance Template change.
+ gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ template: my-instance-template-2-small
+ recreate_instances: yes
+
+ - name: Pause for 30 seconds
+ pause:
+ seconds: 30
+
+ - name: Resize MIG
+ gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+
+ - name: Update MIG with Autoscaler
+ gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+ template: my-instance-template-2-small
+ recreate_instances: yes
+ autoscaling:
+ enabled: yes
+ name: my-autoscaler
+ policy:
+ min_instances: 2
+ max_instances: 5
+ cool_down_period: 37
+ cpu_utilization:
+ target: .39
+ load_balancing_utilization:
+ target: 0.4
+
+ - name: Pause for 30 seconds
+ pause:
+ seconds: 30
+
+ - name: Delete MIG
+ gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: absent
+ autoscaling:
+ enabled: no
+ name: my-autoscaler
+'''
+RETURN = '''
+zone:
+ description: Zone in which to launch MIG.
+ returned: always
+ type: string
+ sample: "us-central1-b"
+
+template:
+ description: Instance Template to use for VMs. Must exist prior to using with MIG.
+ returned: changed
+ type: string
+ sample: "my-instance-template"
+
+name:
+ description: Name of the Managed Instance Group.
+ returned: changed
+ type: string
+ sample: "my-managed-instance-group"
+
+named_ports:
+ description: list of named ports acted upon
+ returned: when named_ports are initially set or updated
+ type: list
+ sample: [{ "name": "http", "port": 80 }, { "name": "foo", "port": 82 }]
+
+size:
+ description: Number of VMs in Managed Instance Group.
+ returned: changed
+ type: integer
+ sample: 4
+
+created_instances:
+ description: Names of instances created.
+ returned: When instances are created.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+deleted_instances:
+ description: Names of instances deleted.
+ returned: When instances are deleted.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_created_instances:
+ description: Names of instances created during resizing.
+ returned: When a resize results in the creation of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_deleted_instances:
+ description: Names of instances deleted during resizing.
+ returned: When a resize results in the deletion of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+recreated_instances:
+ description: Names of instances recreated.
+ returned: When instances are recreated.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+created_autoscaler:
+ description: True if Autoscaler was attempted and created. False otherwise.
+ returned: When the creation of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+updated_autoscaler:
+ description: True if an Autoscaler update was attempted and succeeded.
+ False returned if update failed.
+ returned: When the update of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+deleted_autoscaler:
+ description: True if an Autoscaler delete attempted and succeeded.
+ False returned if delete failed.
+ returned: When the delete of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+set_named_ports:
+ description: True if the named_ports have been set
+ returned: named_ports have been set
+ type: bool
+ sample: true
+
+updated_named_ports:
+ description: True if the named_ports have been updated
+ returned: named_ports have been updated
+ type: bool
+ sample: true
+'''
+
+import socket
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+
+def _check_params(params, field_list):
+ """
+ Helper to validate params.
+
+ Use this in function definitions if they require specific fields
+ to be present.
+
+ :param params: structure that contains the fields
+ :type params: ``dict``
+
+ :param field_list: list of dict representing the fields
+ [{'name': str, 'required': True/False', 'type': cls}]
+ :type field_list: ``list`` of ``dict``
+
+ :return True, exits otherwise
+ :rtype: ``bool``
+ """
+ for d in field_list:
+ if not d['name'] in params:
+ if d['required'] is True:
+ return (False, "%s is required and must be of type: %s" %
+ (d['name'], str(d['type'])))
+ else:
+ if not isinstance(params[d['name']], d['type']):
+ return (False,
+ "%s must be of type: %s" % (d['name'], str(d['type'])))
+
+ return (True, '')
+
+
+def _validate_autoscaling_params(params):
+ """
+ Validate that the minimum configuration is present for autoscaling.
+
+ :param params: Ansible dictionary containing autoscaling configuration
+ It is expected that autoscaling config will be found at the
+ key 'autoscaling'.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if autoscaler
+ is valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['autoscaling']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['autoscaling'], dict):
+ return (False,
+ 'autoscaling: configuration expected to be a dictionary.')
+
+ # check first-level required fields
+ as_req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'enabled', 'required': True, 'type': bool},
+ {'name': 'policy', 'required': True, 'type': dict}
+ ] # yapf: disable
+
+ (as_req_valid, as_req_msg) = _check_params(params['autoscaling'],
+ as_req_fields)
+ if not as_req_valid:
+ return (False, as_req_msg)
+
+ # check policy configuration
+ as_policy_fields = [
+ {'name': 'max_instances', 'required': True, 'type': int},
+ {'name': 'min_instances', 'required': False, 'type': int},
+ {'name': 'cool_down_period', 'required': False, 'type': int}
+ ] # yapf: disable
+
+ (as_policy_valid, as_policy_msg) = _check_params(
+ params['autoscaling']['policy'], as_policy_fields)
+ if not as_policy_valid:
+ return (False, as_policy_msg)
+
+ # TODO(supertom): check utilization fields
+
+ return (True, '')
+
+
+def _validate_named_port_params(params):
+ """
+ Validate the named ports parameters
+
+ :param params: Ansible dictionary containing named_ports configuration
+ It is expected that autoscaling config will be found at the
+ key 'named_ports'. That key should contain a list of
+ {name : port} dictionaries.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if params
+ are valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['named_ports']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['named_ports'], list):
+ return (False, 'named_ports: expected list of name:port dictionaries.')
+ req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'port', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ for np in params['named_ports']:
+ (valid_named_ports, np_msg) = _check_params(np, req_fields)
+ if not valid_named_ports:
+ return (False, np_msg)
+
+ return (True, '')
+
+
+def _get_instance_list(mig, field='name', filter_list=['NONE']):
+ """
+ Helper to grab field from instances response.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param field: Field name in list_managed_instances response. Defaults
+ to 'name'.
+ :type field: ``str``
+
+ :param filter_list: list of 'currentAction' strings to filter on. Only
+ items that match a currentAction in this list will
+ be returned. Default is "['NONE']".
+ :type filter_list: ``list`` of ``str``
+
+ :return: List of strings from list_managed_instances response.
+ :rtype: ``list``
+ """
+ return [x[field] for x in mig.list_managed_instances()
+ if x['currentAction'] in filter_list]
+
+
+def _gen_gce_as_policy(as_params):
+ """
+ Take Autoscaler params and generate GCE-compatible policy.
+
+ :param as_params: Dictionary in Ansible-playbook format
+ containing policy arguments.
+ :type as_params: ``dict``
+
+ :return: GCE-compatible policy dictionary
+ :rtype: ``dict``
+ """
+ asp_data = {}
+ asp_data['maxNumReplicas'] = as_params['max_instances']
+ if 'min_instances' in as_params:
+ asp_data['minNumReplicas'] = as_params['min_instances']
+ if 'cool_down_period' in as_params:
+ asp_data['coolDownPeriodSec'] = as_params['cool_down_period']
+ if 'cpu_utilization' in as_params and 'target' in as_params[
+ 'cpu_utilization']:
+ asp_data['cpuUtilization'] = {'utilizationTarget':
+ as_params['cpu_utilization']['target']}
+ if 'load_balancing_utilization' in as_params and 'target' in as_params[
+ 'load_balancing_utilization']:
+ asp_data['loadBalancingUtilization'] = {
+ 'utilizationTarget':
+ as_params['load_balancing_utilization']['target']
+ }
+
+ return asp_data
+
+
+def create_autoscaler(gce, mig, params):
+ """
+ Create a new Autoscaler for a MIG.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param mig: An initialized GCEInstanceGroupManager.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ as_policy = _gen_gce_as_policy(params['policy'])
+ autoscaler = gce.ex_create_autoscaler(name=params['name'], zone=mig.zone,
+ instance_group=mig, policy=as_policy)
+ if autoscaler:
+ changed = True
+ return changed
+
+
+def update_autoscaler(gce, autoscaler, params):
+ """
+ Update an Autoscaler.
+
+ Takes an existing Autoscaler object, and updates it with
+ the supplied params before calling libcloud's update method.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param autoscaler: An initialized GCEAutoscaler.
+ :type autoscaler: :class: `GCEAutoscaler`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: True if changes, False otherwise.
+ :rtype: ``bool``
+ """
+ as_policy = _gen_gce_as_policy(params['policy'])
+ if autoscaler.policy != as_policy:
+ autoscaler.policy = as_policy
+ autoscaler = gce.ex_update_autoscaler(autoscaler)
+ if autoscaler:
+ return True
+ return False
+
+
+def delete_autoscaler(autoscaler):
+ """
+ Delete an Autoscaler. Does not affect MIG.
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ if autoscaler.destroy():
+ changed = True
+ return changed
+
+
+def get_autoscaler(gce, name, zone):
+ """
+ Get an Autoscaler from GCE.
+
+ If the Autoscaler is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Autoscaler.
+ :type name: ``str``
+
+ :param zone: Zone that the Autoscaler is located in.
+ :type zone: ``str``
+
+ :return: A GCEAutoscaler object or None.
+ :rtype: :class: `GCEAutoscaler` or None
+ """
+ try:
+ # Does the Autoscaler already exist?
+ return gce.ex_get_autoscaler(name, zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_mig(gce, params):
+ """
+ Create a new Managed Instance Group.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING']
+
+ mig = gce.ex_create_instancegroupmanager(
+ name=params['name'], size=params['size'], template=params['template'],
+ zone=params['zone'])
+
+ if mig:
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def delete_mig(mig):
+ """
+ Delete a Managed Instance Group. All VMs in that MIG are also deleted."
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['NONE', 'CREATING', 'RECREATING', 'DELETING',
+ 'ABANDONING', 'RESTARTING', 'REFRESHING']
+ instance_names = _get_instance_list(mig, filter_list=actions_filter)
+ if mig.destroy():
+ changed = True
+ return_data = instance_names
+
+ return (changed, return_data)
+
+
+def recreate_instances_in_mig(mig):
+ """
+ Recreate the instances for a Managed Instance Group.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['RECREATING']
+
+ if mig.recreate_instances():
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def resize_mig(mig, size):
+ """
+ Resize a Managed Instance Group.
+
+ Based on the size provided, GCE will automatically create and delete
+ VMs as needed.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING', 'DELETING']
+
+ if mig.resize(size):
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def get_mig(gce, name, zone):
+ """
+ Get a Managed Instance Group from GCE.
+
+ If the MIG is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Managed Instance Group.
+ :type name: ``str``
+
+ :param zone: Zone that the Managed Instance Group is located in.
+ :type zone: ``str``
+
+ :return: A GCEInstanceGroupManager object or None.
+ :rtype: :class: `GCEInstanceGroupManager` or None
+ """
+ try:
+ # Does the MIG already exist?
+ return gce.ex_get_instancegroupmanager(name=name, zone=zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def update_named_ports(mig, named_ports):
+ """
+ Set the named ports on a Managed Instance Group.
+
+ Sort the existing named ports and new. If different, update.
+ This also implicitly allows for the removal of named_por
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param named_ports: list of dictionaries in the format of {'name': port}
+ :type named_ports: ``list`` of ``dict``
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ changed = False
+ existing_ports = []
+ new_ports = []
+ if hasattr(mig.instance_group, 'named_ports'):
+ existing_ports = sorted(mig.instance_group.named_ports,
+ key=lambda x: x['name'])
+ if named_ports is not None:
+ new_ports = sorted(named_ports, key=lambda x: x['name'])
+
+ if existing_ports != new_ports:
+ if mig.instance_group.set_named_ports(named_ports):
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ template=dict(),
+ recreate_instances=dict(type='bool', default=False),
+ # Do not set a default size here. For Create and some update
+ # operations, it is required and should be explicitly set.
+ # Below, we set it to the existing value if it has not been set.
+ size=dict(type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ zone=dict(required=True),
+ autoscaling=dict(type='dict', default=None),
+ named_ports=dict(type='list', default=None),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.')
+
+ gce = gce_connect(module)
+ if not hasattr(gce, 'ex_create_instancegroupmanager'):
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.',
+ changed=False)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['zone'] = module.params.get('zone')
+ params['name'] = module.params.get('name')
+ params['size'] = module.params.get('size')
+ params['template'] = module.params.get('template')
+ params['recreate_instances'] = module.params.get('recreate_instances')
+ params['autoscaling'] = module.params.get('autoscaling', None)
+ params['named_ports'] = module.params.get('named_ports', None)
+
+ (valid_autoscaling, as_msg) = _validate_autoscaling_params(params)
+ if not valid_autoscaling:
+ module.fail_json(msg=as_msg, changed=False)
+
+ if params['named_ports'] is not None and not hasattr(
+ gce, 'ex_instancegroup_set_named_ports'):
+ module.fail_json(
+ msg="Apache Libcloud 1.3.0+ is required to use 'named_ports' option",
+ changed=False)
+
+ (valid_named_ports, np_msg) = _validate_named_port_params(params)
+ if not valid_named_ports:
+ module.fail_json(msg=np_msg, changed=False)
+
+ changed = False
+ json_output = {'state': params['state'], 'zone': params['zone']}
+ mig = get_mig(gce, params['name'], params['zone'])
+
+ if not mig:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown managed instance group: %s" %
+ (params['name']))
+ else:
+ # Create MIG
+ req_create_fields = [
+ {'name': 'template', 'required': True, 'type': str},
+ {'name': 'size', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ (valid_create_fields, valid_create_msg) = _check_params(
+ params, req_create_fields)
+ if not valid_create_fields:
+ module.fail_json(msg=valid_create_msg, changed=False)
+
+ (changed, json_output['created_instances']) = create_mig(gce,
+ params)
+ if params['autoscaling'] and params['autoscaling'][
+ 'enabled'] is True:
+ # Fetch newly-created MIG and create Autoscaler for it.
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to fetch MIG %s to create autoscaler \
+ in zone: %s' % (params['name'], params['zone']),
+ changed=False)
+
+ json_output['created_autoscaler'] = True
+ # Add named ports if available
+ if params['named_ports']:
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+ json_output['set_named_ports'] = update_named_ports(
+ mig, params['named_ports'])
+ if json_output['set_named_ports']:
+ json_output['named_ports'] = params['named_ports']
+
+ elif params['state'] == 'absent':
+ # Delete MIG
+
+ # First, check and remove the autoscaler, if present.
+ # Note: multiple autoscalers can be associated to a single MIG. We
+ # only handle the one that is named, but we might want to think about this.
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ module.fail_json(msg='Unable to fetch autoscaler %s to delete \
+ in zone: %s' % (params['autoscaling']['name'], params['zone']),
+ changed=False)
+
+ changed = delete_autoscaler(autoscaler)
+ json_output['deleted_autoscaler'] = changed
+
+ # Now, delete the MIG.
+ (changed, json_output['deleted_instances']) = delete_mig(mig)
+
+ else:
+ # Update MIG
+
+ # If we're going to update a MIG, we need a size and template values.
+ # If not specified, we use the values from the existing MIG.
+ if not params['size']:
+ params['size'] = mig.size
+
+ if not params['template']:
+ params['template'] = mig.template.name
+
+ if params['template'] != mig.template.name:
+ # Update Instance Template.
+ new_template = gce.ex_get_instancetemplate(params['template'])
+ mig.set_instancetemplate(new_template)
+ json_output['updated_instancetemplate'] = True
+ changed = True
+ if params['recreate_instances'] is True:
+ # Recreate Instances.
+ (changed, json_output['recreated_instances']
+ ) = recreate_instances_in_mig(mig)
+
+ if params['size'] != mig.size:
+ # Resize MIG.
+ keystr = 'created' if params['size'] > mig.size else 'deleted'
+ (changed, json_output['resize_%s_instances' %
+ (keystr)]) = resize_mig(mig, params['size'])
+
+ # Update Autoscaler
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ # Try to create autoscaler.
+ # Note: this isn't perfect, if the autoscaler name has changed
+ # we wouldn't know that here.
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to create autoscaler %s for existing MIG %s\
+ in zone: %s' % (params['autoscaling']['name'],
+ params['name'], params['zone']),
+ changed=False)
+ json_output['created_autoscaler'] = True
+ changed = True
+ else:
+ if params['autoscaling']['enabled'] is False:
+ # Delete autoscaler
+ changed = delete_autoscaler(autoscaler)
+ json_output['delete_autoscaler'] = changed
+ else:
+ # Update policy, etc.
+ changed = update_autoscaler(gce, autoscaler,
+ params['autoscaling'])
+ json_output['updated_autoscaler'] = changed
+ named_ports = params['named_ports'] or []
+ json_output['updated_named_ports'] = update_named_ports(mig,
+ named_ports)
+ if json_output['updated_named_ports']:
+ json_output['named_ports'] = named_ports
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.gce import *
+if __name__ == '__main__':
+ main()
diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py
index 128fdb41291..aec0a294271 100644
--- a/cloud/google/gce_net.py
+++ b/cloud/google/gce_net.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: gce_net
diff --git a/cloud/google/gce_pd.py b/cloud/google/gce_pd.py
index f71442b16a1..a645806712b 100644
--- a/cloud/google/gce_pd.py
+++ b/cloud/google/gce_pd.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: gce_pd
diff --git a/cloud/linode/linode.py b/cloud/linode/linode.py
index 022dc231bf4..7de16bbb2b9 100644
--- a/cloud/linode/linode.py
+++ b/cloud/linode/linode.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: linode
@@ -443,14 +447,14 @@ def main():
state = dict(default='present', choices=['active', 'present', 'started',
'deleted', 'absent', 'stopped',
'restarted']),
- api_key = dict(),
+ api_key = dict(no_log=True),
name = dict(type='str'),
plan = dict(type='int'),
distribution = dict(type='int'),
datacenter = dict(type='int'),
linode_id = dict(type='int', aliases=['lid']),
payment_term = dict(type='int', default=1, choices=[1, 12, 24]),
- password = dict(type='str'),
+ password = dict(type='str', no_log=True),
ssh_pub_key = dict(type='str'),
swap = dict(type='int', default=512),
wait = dict(type='bool', default=True),
diff --git a/cloud/openstack/README.md b/cloud/openstack/README.md
index 4a872b11954..36cdcd383fe 100644
--- a/cloud/openstack/README.md
+++ b/cloud/openstack/README.md
@@ -54,3 +54,11 @@ Libraries
users as a primary audience, they are for intra-server communication. The
python-openstacksdk is the future there, and shade will migrate to it when
its ready in a manner that is not noticable to ansible users.
+
+Testing
+-------
+
+* Integration testing is currently done in OpenStack's CI system in
+ http://git.openstack.org/cgit/openstack-infra/shade/tree/shade/tests/ansible
+* Testing in shade produces an obvious chicken-and-egg scenario. Work is under
+ way to trigger from and report on PRs directly.
diff --git a/cloud/openstack/_glance_image.py b/cloud/openstack/_glance_image.py
index 0db7aa2c5f0..a97255241ae 100644
--- a/cloud/openstack/_glance_image.py
+++ b/cloud/openstack/_glance_image.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: glance_image
@@ -44,7 +48,7 @@
description:
- The keystone url for authentication
required: false
- default: 'http://127.0.0.1:35357/v2.0/'
+ default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
@@ -120,15 +124,16 @@
'''
EXAMPLES = '''
-# Upload an image from an HTTP URL
-- glance_image: login_username=admin
- login_password=passme
- login_tenant_name=admin
- name=cirros
- container_format=bare
- disk_format=qcow2
- state=present
- copy_from=http:launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
+- name: Upload an image from an HTTP URL
+ glance_image:
+ login_username: admin
+ login_password: passme
+ login_tenant_name: admin
+ name: cirros
+ container_format: bare
+ disk_format: qcow2
+ state: present
+ copy_from: http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
'''
import time
diff --git a/cloud/openstack/_keystone_user.py b/cloud/openstack/_keystone_user.py
index 72eba989862..250c8936bed 100644
--- a/cloud/openstack/_keystone_user.py
+++ b/cloud/openstack/_keystone_user.py
@@ -17,6 +17,10 @@
# Based on Jimmy Tang's implementation
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: keystone_user
@@ -51,7 +55,7 @@
description:
- The keystone url for authentication
required: false
- default: 'http://127.0.0.1:35357/v2.0/'
+ default: http://127.0.0.1:35357/v2.0/
user:
description:
- The name of the user that has to added/removed from OpenStack
@@ -94,14 +98,22 @@
'''
EXAMPLES = '''
-# Create a tenant
-- keystone_user: tenant=demo tenant_description="Default Tenant"
-
-# Create a user
-- keystone_user: user=john tenant=demo password=secrete
-
-# Apply the admin role to the john user in the demo tenant
-- keystone_user: role=admin user=john tenant=demo
+- name: Create a tenant
+ keystone_user:
+ tenant: demo
+ tenant_description: "Default Tenant"
+
+- name: Create a user
+ keystone_user:
+ user: john
+ tenant: demo
+ password: secrete
+
+- name: Apply the admin role to the john user in the demo tenant
+ keystone_user:
+ role: admin
+ user: john
+ tenant: demo
'''
try:
@@ -229,7 +241,7 @@ def ensure_user_exists(keystone, user_name, password, email, tenant_name,
check_mode):
""" Check if user exists
- Return (True, id) if a new user was created, (False, id) user alrady
+ Return (True, id) if a new user was created, (False, id) user already
exists
"""
diff --git a/cloud/openstack/_nova_compute.py b/cloud/openstack/_nova_compute.py
index 29d3ab04591..0bea21048f5 100644
--- a/cloud/openstack/_nova_compute.py
+++ b/cloud/openstack/_nova_compute.py
@@ -30,6 +30,10 @@
except ImportError:
HAS_NOVACLIENT = False
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: nova_compute
@@ -58,7 +62,7 @@
description:
- The keystone url for authentication
required: false
- default: 'http://127.0.0.1:35357/v2.0/'
+ default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
diff --git a/cloud/openstack/_nova_keypair.py b/cloud/openstack/_nova_keypair.py
index 330a280fdeb..914db91bf2f 100644
--- a/cloud/openstack/_nova_keypair.py
+++ b/cloud/openstack/_nova_keypair.py
@@ -25,11 +25,15 @@
except ImportError:
HAS_NOVACLIENT = False
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: nova_keypair
version_added: "1.2"
-author:
+author:
- "Benno Joy (@bennojoy)"
- "Michael DeHaan"
deprecated: Deprecated in 2.0. Use os_keypair instead
@@ -56,7 +60,7 @@
description:
- The keystone url for authentication
required: false
- default: 'http://127.0.0.1:35357/v2.0/'
+ default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
@@ -83,14 +87,22 @@
- "python-novaclient"
'''
EXAMPLES = '''
-# Creates a key pair with the running users public key
-- nova_keypair: state=present login_username=admin
- login_password=admin login_tenant_name=admin name=ansible_key
- public_key={{ lookup('file','~/.ssh/id_rsa.pub') }}
+- name: Create a key pair with the running users public key
+ nova_keypair:
+ state: present
+ login_username: admin
+ login_password: admin
+ login_tenant_name: admin
+ name: ansible_key
+ public_key: "{{ lookup('file','~/.ssh/id_rsa.pub') }}"
-# Creates a new key pair and the private key returned after the run.
-- nova_keypair: state=present login_username=admin login_password=admin
- login_tenant_name=admin name=ansible_key
+- name: Create a new key pair and the private key returned after the run.
+ nova_keypair:
+ state: present
+ login_username: admin
+ login_password: admin
+ login_tenant_name: admin
+ name: ansible_key
'''
def main():
diff --git a/cloud/openstack/_quantum_floating_ip.py b/cloud/openstack/_quantum_floating_ip.py
index e6eb267e54b..9c72c431d0b 100644
--- a/cloud/openstack/_quantum_floating_ip.py
+++ b/cloud/openstack/_quantum_floating_ip.py
@@ -29,6 +29,10 @@
except ImportError:
HAVE_DEPS = False
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: quantum_floating_ip
@@ -60,7 +64,7 @@
description:
- The keystone url for authentication
required: false
- default: 'http://127.0.0.1:35357/v2.0/'
+ default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
@@ -95,10 +99,15 @@
'''
EXAMPLES = '''
-# Assign a floating ip to the instance from an external network
-- quantum_floating_ip: state=present login_username=admin login_password=admin
- login_tenant_name=admin network_name=external_network
- instance_name=vm1 internal_network_name=internal_network
+- name: Assign a floating ip to the instance from an external network
+ quantum_floating_ip:
+ state: present
+ login_username: admin
+ login_password: admin
+ login_tenant_name: admin
+ network_name: external_network
+ instance_name: vm1
+ internal_network_name: internal_network
'''
def _get_ksclient(module, kwargs):
diff --git a/cloud/openstack/_quantum_floating_ip_associate.py b/cloud/openstack/_quantum_floating_ip_associate.py
index 9085297447e..f7eed5fe861 100644
--- a/cloud/openstack/_quantum_floating_ip_associate.py
+++ b/cloud/openstack/_quantum_floating_ip_associate.py
@@ -28,6 +28,10 @@
except ImportError:
HAVE_DEPS = False
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: quantum_floating_ip_associate
@@ -57,7 +61,7 @@
description:
- the keystone url for authentication
required: false
- default: 'http://127.0.0.1:35357/v2.0/'
+ default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- name of the region
@@ -86,14 +90,14 @@
'''
EXAMPLES = '''
-# Associate a specific floating IP with an Instance
-- quantum_floating_ip_associate:
- state=present
- login_username=admin
- login_password=admin
- login_tenant_name=admin
- ip_address=1.1.1.1
- instance_name=vm1
+- name: Associate a specific floating IP with an Instance
+ quantum_floating_ip_associate:
+ state: present
+ login_username: admin
+ login_password: admin
+ login_tenant_name: admin
+ ip_address: 1.1.1.1
+ instance_name: vm1
'''
def _get_ksclient(module, kwargs):
diff --git a/cloud/openstack/_quantum_network.py b/cloud/openstack/_quantum_network.py
index 09de5e4702d..db82e90d335 100644
--- a/cloud/openstack/_quantum_network.py
+++ b/cloud/openstack/_quantum_network.py
@@ -26,6 +26,10 @@
except ImportError:
HAVE_DEPS = False
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: quantum_network
@@ -59,7 +63,7 @@
description:
- The keystone url for authentication
required: false
- default: 'http://127.0.0.1:35357/v2.0/'
+ default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
@@ -72,7 +76,7 @@
default: present
name:
description:
- - Name to be assigned to the nework
+ - Name to be assigned to the network
required: true
default: None
provider_network_type:
@@ -113,15 +117,26 @@
'''
EXAMPLES = '''
-# Create a GRE backed Quantum network with tunnel id 1 for tenant1
-- quantum_network: name=t1network tenant_name=tenant1 state=present
- provider_network_type=gre provider_segmentation_id=1
- login_username=admin login_password=admin login_tenant_name=admin
-
-# Create an external network
-- quantum_network: name=external_network state=present
- provider_network_type=local router_external=yes
- login_username=admin login_password=admin login_tenant_name=admin
+- name: Create a GRE backed Quantum network with tunnel id 1 for tenant1
+ quantum_network:
+ name: t1network
+ tenant_name: tenant1
+ state: present
+ provider_network_type: gre
+ provider_segmentation_id: 1
+ login_username: admin
+ login_password: admin
+ login_tenant_name: admin
+
+- name: Create an external network
+ quantum_network:
+ name: external_network
+ state: present
+ provider_network_type: local
+ router_external: yes
+ login_username: admin
+ login_password: admin
+ login_tenant_name: admin
'''
_os_keystone = None
diff --git a/cloud/openstack/_quantum_router.py b/cloud/openstack/_quantum_router.py
index f1e0ed8c5b7..c65f916d6b1 100644
--- a/cloud/openstack/_quantum_router.py
+++ b/cloud/openstack/_quantum_router.py
@@ -26,6 +26,10 @@
except ImportError:
HAVE_DEPS = False
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: quantum_router
@@ -55,7 +59,7 @@
description:
- The keystone url for authentication
required: false
- default: 'http://127.0.0.1:35357/v2.0/'
+ default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
@@ -88,12 +92,13 @@
'''
EXAMPLES = '''
-# Creates a router for tenant admin
-- quantum_router: state=present
- login_username=admin
- login_password=admin
- login_tenant_name=admin
- name=router1"
+- name: Create a router for tenant admin
+ quantum_router:
+ state: present
+ login_username: admin
+ login_password: admin
+ login_tenant_name: admin
+ name: router1
'''
_os_keystone = None
diff --git a/cloud/openstack/_quantum_router_gateway.py b/cloud/openstack/_quantum_router_gateway.py
index 0e060b50b7e..af6179bc62d 100644
--- a/cloud/openstack/_quantum_router_gateway.py
+++ b/cloud/openstack/_quantum_router_gateway.py
@@ -26,6 +26,10 @@
except ImportError:
HAVE_DEPS = False
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: quantum_router_gateway
@@ -55,7 +59,7 @@
description:
- The keystone URL for authentication
required: false
- default: 'http://127.0.0.1:35357/v2.0/'
+ default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
@@ -83,10 +87,14 @@
'''
EXAMPLES = '''
-# Attach an external network with a router to allow flow of external traffic
-- quantum_router_gateway: state=present login_username=admin login_password=admin
- login_tenant_name=admin router_name=external_router
- network_name=external_network
+- name: Attach an external network with a router to allow flow of external traffic
+ quantum_router_gateway:
+ state: present
+ login_username: admin
+ login_password: admin
+ login_tenant_name: admin
+ router_name: external_router
+ network_name: external_network
'''
_os_keystone = None
diff --git a/cloud/openstack/_quantum_router_interface.py b/cloud/openstack/_quantum_router_interface.py
index e97740d9b55..b2a1784d99a 100644
--- a/cloud/openstack/_quantum_router_interface.py
+++ b/cloud/openstack/_quantum_router_interface.py
@@ -26,15 +26,19 @@
except ImportError:
HAVE_DEPS = False
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: quantum_router_interface
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_router instead
-short_description: Attach/Dettach a subnet's interface to a router
+short_description: Attach/Detach a subnet's interface to a router
description:
- - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet.
+ - Attach/Detach a subnet interface to a router, to provide a gateway for the subnet.
options:
login_username:
description:
@@ -88,13 +92,15 @@
'''
EXAMPLES = '''
-# Attach tenant1's subnet to the external router
-- quantum_router_interface: state=present login_username=admin
- login_password=admin
- login_tenant_name=admin
- tenant_name=tenant1
- router_name=external_route
- subnet_name=t1subnet
+- name: "Attach tenant1's subnet to the external router"
+ quantum_router_interface:
+ state: present
+ login_username: admin
+ login_password: admin
+ login_tenant_name: admin
+ tenant_name: tenant1
+ router_name: external_route
+ subnet_name: t1subnet
'''
diff --git a/cloud/openstack/_quantum_subnet.py b/cloud/openstack/_quantum_subnet.py
index 27c2a3e8ea9..e03f5962295 100644
--- a/cloud/openstack/_quantum_subnet.py
+++ b/cloud/openstack/_quantum_subnet.py
@@ -26,6 +26,10 @@
except ImportError:
HAVE_DEPS = False
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: quantum_subnet
@@ -54,7 +58,7 @@
description:
- The keystone URL for authentication
required: false
- default: 'http://127.0.0.1:35357/v2.0/'
+ default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
@@ -123,10 +127,16 @@
'''
EXAMPLES = '''
-# Create a subnet for a tenant with the specified subnet
-- quantum_subnet: state=present login_username=admin login_password=admin
- login_tenant_name=admin tenant_name=tenant1
- network_name=network1 name=net1subnet cidr=192.168.0.0/24"
+- name: Create a subnet for a tenant with the specified subnet
+ quantum_subnet:
+ state: present
+ login_username: admin
+ login_password: admin
+ login_tenant_name: admin
+ tenant_name: tenant1
+ network_name: network1
+ name: net1subnet
+ cidr: 192.168.0.0/24
'''
_os_keystone = None
diff --git a/cloud/openstack/os_auth.py b/cloud/openstack/os_auth.py
index 4f4d22eac94..bcc95aaaa1f 100644
--- a/cloud/openstack/os_auth.py
+++ b/cloud/openstack/os_auth.py
@@ -22,6 +22,10 @@
except ImportError:
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_auth
@@ -37,10 +41,13 @@
'''
EXAMPLES = '''
-# Authenticate to the cloud and retrieve the service catalog
-- os_auth:
+- name: Authenticate to the cloud and retrieve the service catalog
+ os_auth:
cloud: rax-dfw
-- debug: var=service_catalog
+
+- name: Show service catalog
+ debug:
+ var: service_catalog
'''
def main():
diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py
index 1627bdfe322..cc840085f67 100644
--- a/cloud/openstack/os_client_config.py
+++ b/cloud/openstack/os_client_config.py
@@ -18,6 +18,10 @@
import os_client_config
from os_client_config import exceptions
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_client_config
@@ -39,15 +43,17 @@
'''
EXAMPLES = '''
-# Get list of clouds that do not support security groups
-- os_client_config:
-- debug: var={{ item }}
- with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}"
+- name: Get list of clouds that do not support security groups
+ os_client_config:
+
+- debug:
+ var: "{{ item }}"
+ with_items: "{{ openstack.clouds | rejectattr('secgroup_source', 'none') | list }}"
-# Get the information back just about the mordred cloud
-- os_client_config:
+- name: Get the information back just about the mordred cloud
+ os_client_config:
clouds:
- - mordred
+ - mordred
'''
diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py
index f20812a5e57..16c217c2d86 100644
--- a/cloud/openstack/os_floating_ip.py
+++ b/cloud/openstack/os_floating_ip.py
@@ -23,6 +23,13 @@
except ImportError:
HAS_SHADE = False
+from distutils.version import StrictVersion
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_floating_ip
@@ -59,6 +66,14 @@
- To which fixed IP of server the floating IP address should be
attached to.
required: false
+ nat_destination:
+ description:
+ - The name or id of a neutron private network that the fixed IP to
+ attach floating IP is on
+ required: false
+ default: None
+ aliases: ["fixed_network", "internal_network"]
+ version_added: "2.3"
wait:
description:
- When attaching a floating IP address, specify whether we should
@@ -107,6 +122,17 @@
wait: true
timeout: 180
+# Assign a new floating IP from the network `ext_net` to the instance fixed
+# ip in network `private_net` of `cattle001`.
+- os_floating_ip:
+ cloud: dguerri
+ state: present
+ server: cattle001
+ network: ext_net
+ nat_destination: private_net
+ wait: true
+ timeout: 180
+
# Detach a floating IP address from a server
- os_floating_ip:
cloud: dguerri
@@ -133,6 +159,8 @@ def main():
floating_ip_address=dict(required=False, default=None),
reuse=dict(required=False, type='bool', default=False),
fixed_address=dict(required=False, default=None),
+ nat_destination=dict(required=False, default=None,
+ aliases=['fixed_network', 'internal_network']),
wait=dict(required=False, type='bool', default=False),
timeout=dict(required=False, type='int', default=60),
purge=dict(required=False, type='bool', default=False),
@@ -144,12 +172,18 @@ def main():
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
+ if (module.params['nat_destination'] and
+ StrictVersion(shade.__version__) < StrictVersion('1.8.0')):
+ module.fail_json(msg="To utilize nat_destination, the installed version of"
+ "the shade library MUST be >= 1.8.0")
+
server_name_or_id = module.params['server']
state = module.params['state']
network = module.params['network']
floating_ip_address = module.params['floating_ip_address']
reuse = module.params['reuse']
fixed_address = module.params['fixed_address']
+ nat_destination = module.params['nat_destination']
wait = module.params['wait']
timeout = module.params['timeout']
purge = module.params['purge']
@@ -172,7 +206,8 @@ def main():
network_id = cloud.get_network(name_or_id=network)["id"]
else:
network_id = None
- if all([fixed_address, f_ip.fixed_ip_address == fixed_address,
+ if all([(fixed_address and f_ip.fixed_ip_address == fixed_address) or
+ (nat_destination and f_ip.internal_network == fixed_address),
network, f_ip.network != network_id]):
# Current state definitely conflicts with requirements
module.fail_json(msg="server {server} already has a "
@@ -187,13 +222,13 @@ def main():
# Requirements are met
module.exit_json(changed=False, floating_ip=f_ip)
- # Requirments are vague enough to ignore exisitng f_ip and try
+ # Requirements are vague enough to ignore existing f_ip and try
# to create a new f_ip to the server.
server = cloud.add_ips_to_server(
server=server, ips=floating_ip_address, ip_pool=network,
reuse=reuse, fixed_address=fixed_address, wait=wait,
- timeout=timeout)
+ timeout=timeout, nat_destination=nat_destination)
fip_address = cloud.get_server_public_ip(server)
# Update the floating IP status
f_ip = _get_floating_ip(cloud, fip_address)
diff --git a/cloud/openstack/os_image.py b/cloud/openstack/os_image.py
index 3aa49583f11..7632672555c 100644
--- a/cloud/openstack/os_image.py
+++ b/cloud/openstack/os_image.py
@@ -24,6 +24,10 @@
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_image
diff --git a/cloud/openstack/os_image_facts.py b/cloud/openstack/os_image_facts.py
index 4058d4003e8..a810ad467f3 100644
--- a/cloud/openstack/os_image_facts.py
+++ b/cloud/openstack/os_image_facts.py
@@ -21,6 +21,10 @@
except ImportError:
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: os_image_facts
short_description: Retrieve facts about an image within OpenStack.
@@ -42,15 +46,18 @@
'''
EXAMPLES = '''
-# Gather facts about a previously created image named image1
-- os_image_facts:
+- name: Gather facts about a previously created image named image1
+ os_image_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
image: image1
-- debug: var=openstack
+
+- name: Show openstack facts
+ debug:
+ var: openstack
'''
RETURN = '''
diff --git a/cloud/openstack/os_ironic.py b/cloud/openstack/os_ironic.py
index 79751347615..2296082f321 100644
--- a/cloud/openstack/os_ironic.py
+++ b/cloud/openstack/os_ironic.py
@@ -23,6 +23,10 @@
HAS_SHADE = False
import jsonpatch
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_ironic
@@ -188,7 +192,7 @@ def _choose_id_value(module):
def _choose_if_password_only(module, patch):
if len(patch) is 1:
if 'password' in patch[0]['path'] and module.params['skip_update_of_masked_password']:
- # Return false to aabort update as the password appears
+ # Return false to abort update as the password appears
# to be the only element in the patch.
return False
return True
diff --git a/cloud/openstack/os_ironic_node.py b/cloud/openstack/os_ironic_node.py
index 17e3b1578eb..fa41d6fcbc0 100644
--- a/cloud/openstack/os_ironic_node.py
+++ b/cloud/openstack/os_ironic_node.py
@@ -24,6 +24,10 @@
from distutils.version import StrictVersion
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_ironic_node
diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py
index cf2b0a808ae..8651901a2af 100644
--- a/cloud/openstack/os_keypair.py
+++ b/cloud/openstack/os_keypair.py
@@ -25,6 +25,10 @@
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_keypair
diff --git a/cloud/openstack/os_network.py b/cloud/openstack/os_network.py
index 9a0c2516310..39f0afa219b 100644
--- a/cloud/openstack/os_network.py
+++ b/cloud/openstack/os_network.py
@@ -25,6 +25,10 @@
from distutils.version import StrictVersion
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_network
@@ -69,7 +73,7 @@
provider_network_type:
description:
- The type of physical network that maps to this network resource.
- choices: ['flat', 'vlan', 'vxlan', 'gre']
+ choices: ['flat', 'vlan', 'vxlan', 'gre', 'uplink']
required: false
default: None
version_added: "2.1"
@@ -169,7 +173,7 @@ def main():
external=dict(default=False, type='bool'),
provider_physical_network=dict(required=False),
provider_network_type=dict(required=False, default=None,
- choices=['flat', 'vlan', 'vxlan', 'gre']),
+ choices=['flat', 'vlan', 'vxlan', 'gre', 'uplink']),
provider_segmentation_id=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
project=dict(default=None)
@@ -222,8 +226,12 @@ def main():
if provider and StrictVersion(shade.__version__) < StrictVersion('1.5.0'):
module.fail_json(msg="Shade >= 1.5.0 required to use provider options")
- net = cloud.create_network(name, shared, admin_state_up,
- external, provider, project_id)
+ if project_id is not None:
+ net = cloud.create_network(name, shared, admin_state_up,
+ external, provider, project_id)
+ else:
+ net = cloud.create_network(name, shared, admin_state_up,
+ external, provider)
changed = True
else:
changed = False
diff --git a/cloud/openstack/os_networks_facts.py b/cloud/openstack/os_networks_facts.py
index 9db5eceaa69..f39580321ee 100644
--- a/cloud/openstack/os_networks_facts.py
+++ b/cloud/openstack/os_networks_facts.py
@@ -21,6 +21,10 @@
except ImportError:
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_networks_facts
@@ -46,28 +50,34 @@
'''
EXAMPLES = '''
-# Gather facts about previously created networks
-- os_networks_facts:
+- name: Gather facts about previously created networks
+ os_networks_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
-- debug: var=openstack_networks
-# Gather facts about a previously created network by name
-- os_networks_facts:
+- name: Show openstack networks
+ debug:
+ var: openstack_networks
+
+- name: Gather facts about a previously created network by name
+ os_networks_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
name: network1
-- debug: var=openstack_networks
-# Gather facts about a previously created network with filter (note: name and
- filters parameters are Not mutually exclusive)
-- os_networks_facts:
+- name: Show openstack networks
+ debug:
+ var: openstack_networks
+
+- name: Gather facts about a previously created network with filter
+ # Note: name and filters parameters are Not mutually exclusive
+ os_networks_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
@@ -78,7 +88,10 @@
subnets:
- 057d4bdf-6d4d-4728-bb0f-5ac45a6f7400
- 443d4dc0-91d4-4998-b21c-357d10433483
-- debug: var=openstack_networks
+
+- name: Show openstack networks
+ debug:
+ var: openstack_networks
'''
RETURN = '''
diff --git a/cloud/openstack/os_nova_flavor.py b/cloud/openstack/os_nova_flavor.py
index 102b2bf2aee..0f9f5afa91b 100644
--- a/cloud/openstack/os_nova_flavor.py
+++ b/cloud/openstack/os_nova_flavor.py
@@ -21,6 +21,10 @@
except ImportError:
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_nova_flavor
@@ -88,22 +92,21 @@
'''
EXAMPLES = '''
-# Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of
-# local disk, and 10GB of ephemeral.
-- os_nova_flavor:
- cloud=mycloud
- state=present
- name=tiny
- ram=1024
- vcpus=1
- disk=10
- ephemeral=10
-
-# Delete 'tiny' flavor
-- os_nova_flavor:
- cloud=mycloud
- state=absent
- name=tiny
+- name: "Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of local disk, and 10GB of ephemeral."
+ os_nova_flavor:
+ cloud: mycloud
+ state: present
+ name: tiny
+ ram: 1024
+ vcpus: 1
+ disk: 10
+ ephemeral: 10
+
+- name: "Delete 'tiny' flavor"
+ os_nova_flavor:
+ cloud: mycloud
+ state: absent
+ name: tiny
'''
RETURN = '''
diff --git a/cloud/openstack/os_object.py b/cloud/openstack/os_object.py
index d5d77e1318c..9e67ab39dff 100644
--- a/cloud/openstack/os_object.py
+++ b/cloud/openstack/os_object.py
@@ -23,6 +23,10 @@
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_object
@@ -60,11 +64,19 @@
'''
EXAMPLES = '''
-# Creates a object named 'fstab' in the 'config' container
-- os_object: cloud=mordred state=present name=fstab container=config filename=/etc/fstab
-
-# Deletes a container called config and all of its contents
-- os_object: cloud=rax-iad state=absent container=config
+- name: "Create a object named 'fstab' in the 'config' container"
+ os_object:
+ cloud: mordred
+ state: present
+ name: fstab
+ container: config
+ filename: /etc/fstab
+
+- name: Delete a container called config and all of its contents
+ os_object:
+ cloud: rax-iad
+ state: absent
+ container: config
'''
diff --git a/cloud/openstack/os_port.py b/cloud/openstack/os_port.py
index e31f4fa0e1d..56625a63f56 100644
--- a/cloud/openstack/os_port.py
+++ b/cloud/openstack/os_port.py
@@ -22,6 +22,10 @@
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_port
diff --git a/cloud/openstack/os_router.py b/cloud/openstack/os_router.py
index cb492feed1f..d4a6b28bc82 100644
--- a/cloud/openstack/os_router.py
+++ b/cloud/openstack/os_router.py
@@ -22,6 +22,10 @@
from distutils.version import StrictVersion
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_router
diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py
index f2a2bb6ac1e..3ed5dfceb7a 100644
--- a/cloud/openstack/os_security_group.py
+++ b/cloud/openstack/os_security_group.py
@@ -23,6 +23,10 @@
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_security_group
@@ -53,17 +57,17 @@
EXAMPLES = '''
# Create a security group
- os_security_group:
- cloud=mordred
- state=present
- name=foo
- description=security group for foo servers
+ cloud: mordred
+ state: present
+ name: foo
+ description: security group for foo servers
# Update the existing 'foo' security group description
- os_security_group:
- cloud=mordred
- state=present
- name=foo
- description=updated description for the foo security group
+ cloud: mordred
+ state: present
+ name: foo
+ description: updated description for the foo security group
'''
diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py
index af4106ac44c..3379d160408 100644
--- a/cloud/openstack/os_security_group_rule.py
+++ b/cloud/openstack/os_security_group_rule.py
@@ -23,6 +23,10 @@
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_security_group_rule
@@ -38,8 +42,8 @@
required: true
protocol:
description:
- - IP protocol
- choices: ['tcp', 'udp', 'icmp', None]
+ - IP protocols TCP UDP ICMP 112 (VRRP)
+ choices: ['tcp', 'udp', 'icmp', '112', None]
default: None
port_range_min:
description:
@@ -121,6 +125,12 @@
security_group: foo
protocol: tcp
remote_ip_prefix: 0.0.0.0/0
+
+# Create a rule for VRRP with numbered protocol 112
+- os_security_group_rule:
+ security_group: loadbalancer_sg
+ protocol: 112
+ remote_group: loadbalancer-node_sg
'''
RETURN = '''
@@ -251,7 +261,7 @@ def main():
# NOTE(Shrews): None is an acceptable protocol value for
# Neutron, but Nova will balk at this.
protocol = dict(default=None,
- choices=[None, 'tcp', 'udp', 'icmp']),
+ choices=[None, 'tcp', 'udp', 'icmp', '112']),
port_range_min = dict(required=False, type='int'),
port_range_max = dict(required=False, type='int'),
remote_ip_prefix = dict(required=False, default=None),
diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py
index dd2155c4e1a..0bb7dbcfbc6 100644
--- a/cloud/openstack/os_server.py
+++ b/cloud/openstack/os_server.py
@@ -27,6 +27,10 @@
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_server
@@ -185,15 +189,26 @@
required: false
default: false
version_added: "2.2"
+ reuse_ips:
+ description:
+ - When I(auto_ip) is true and this option is true, the I(auto_ip) code
+ will attempt to re-use unassigned floating ips in the project before
+ creating a new one. It is important to note that it is impossible
+ to safely do this concurrently, so if your use case involves
+ concurrent server creation, it is highly recommended to set this to
+ false and to delete the floating ip associated with a server when
+ the server is deleted using I(delete_fip).
+ required: false
+ default: true
+ version_added: "2.2"
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
-# Creates a new instance and attaches to a network and passes metadata to
-# the instance
-- os_server:
+- name: Create a new instance and attaches to a network and passes metadata to the instance
+ os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
@@ -212,96 +227,95 @@
hostname: test1
group: uge_master
-# Creates a new instance in HP Cloud AE1 region availability zone az2 and
+# Create a new instance in HP Cloud AE1 region availability zone az2 and
# automatically assigns a floating IP
- name: launch a compute instance
hosts: localhost
tasks:
- - name: launch an instance
- os_server:
- state: present
- auth:
- auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
- username: username
- password: Equality7-2521
- project_name: username-project1
- name: vm1
- region_name: region-b.geo-1
- availability_zone: az2
- image: 9302692b-b787-4b52-a3a6-daebb79cb498
- key_name: test
- timeout: 200
- flavor: 101
- security_groups: default
- auto_ip: yes
-
-# Creates a new instance in named cloud mordred availability zone az2
+ - name: launch an instance
+ os_server:
+ state: present
+ auth:
+ auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
+ username: username
+ password: Equality7-2521
+ project_name: username-project1
+ name: vm1
+ region_name: region-b.geo-1
+ availability_zone: az2
+ image: 9302692b-b787-4b52-a3a6-daebb79cb498
+ key_name: test
+ timeout: 200
+ flavor: 101
+ security_groups: default
+ auto_ip: yes
+
+# Create a new instance in named cloud mordred availability zone az2
# and assigns a pre-known floating IP
- name: launch a compute instance
hosts: localhost
tasks:
- - name: launch an instance
- os_server:
- state: present
- cloud: mordred
- name: vm1
- availability_zone: az2
- image: 9302692b-b787-4b52-a3a6-daebb79cb498
- key_name: test
- timeout: 200
- flavor: 101
- floating_ips:
- - 12.34.56.79
-
-# Creates a new instance with 4G of RAM on Ubuntu Trusty, ignoring
+ - name: launch an instance
+ os_server:
+ state: present
+ cloud: mordred
+ name: vm1
+ availability_zone: az2
+ image: 9302692b-b787-4b52-a3a6-daebb79cb498
+ key_name: test
+ timeout: 200
+ flavor: 101
+ floating_ips:
+ - 12.34.56.79
+
+# Create a new instance with 4G of RAM on Ubuntu Trusty, ignoring
# deprecated images
- name: launch a compute instance
hosts: localhost
tasks:
- - name: launch an instance
- os_server:
- name: vm1
- state: present
- cloud: mordred
- region_name: region-b.geo-1
- image: Ubuntu Server 14.04
- image_exclude: deprecated
- flavor_ram: 4096
-
-# Creates a new instance with 4G of RAM on Ubuntu Trusty on a Performance node
+ - name: launch an instance
+ os_server:
+ name: vm1
+ state: present
+ cloud: mordred
+ region_name: region-b.geo-1
+ image: Ubuntu Server 14.04
+ image_exclude: deprecated
+ flavor_ram: 4096
+
+# Create a new instance with 4G of RAM on Ubuntu Trusty on a Performance node
- name: launch a compute instance
hosts: localhost
tasks:
- - name: launch an instance
- os_server:
- name: vm1
- cloud: rax-dfw
- state: present
- image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
- flavor_ram: 4096
- flavor_include: Performance
+ - name: launch an instance
+ os_server:
+ name: vm1
+ cloud: rax-dfw
+ state: present
+ image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
+ flavor_ram: 4096
+ flavor_include: Performance
# Creates a new instance and attaches to multiple network
- name: launch a compute instance
hosts: localhost
tasks:
- - name: launch an instance with a string
- os_server:
- auth:
- auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
- username: admin
- password: admin
- project_name: admin
- name: vm1
- image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
- key_name: ansible_key
- timeout: 200
- flavor: 4
- nics: "net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,net-id=542f0430-62fe-11e5-9d70-feff819cdc9f..."
-
-# Creates a new instance and attaches to a network and passes metadata to
-# the instance
-- os_server:
+ - name: launch an instance with a string
+ os_server:
+ auth:
+ auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
+ username: admin
+ password: admin
+ project_name: admin
+ name: vm1
+ image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
+ key_name: ansible_key
+ timeout: 200
+ flavor: 4
+ nics: "net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,net-id=542f0430-62fe-11e5-9d70-feff819cdc9f..."
+
+- name: Creates a new instance and attaches to a network and passes metadata to the instance
+ os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
@@ -318,51 +332,51 @@
- net-name: another_network
meta: "hostname=test1,group=uge_master"
-# Creates a new instance and attaches to a specific network
-- os_server:
- state: present
- auth:
- auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
- username: admin
- password: admin
- project_name: admin
- name: vm1
- image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
- key_name: ansible_key
- timeout: 200
- flavor: 4
- network: another_network
-
-# Creates a new instance with 4G of RAM on a 75G Ubuntu Trusty volume
+- name: Creates a new instance and attaches to a specific network
+ os_server:
+ state: present
+ auth:
+ auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
+ username: admin
+ password: admin
+ project_name: admin
+ name: vm1
+ image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
+ key_name: ansible_key
+ timeout: 200
+ flavor: 4
+ network: another_network
+
+# Create a new instance with 4G of RAM on a 75G Ubuntu Trusty volume
- name: launch a compute instance
hosts: localhost
tasks:
- - name: launch an instance
- os_server:
- name: vm1
- state: present
- cloud: mordred
- region_name: ams01
- image: Ubuntu Server 14.04
- flavor_ram: 4096
- boot_from_volume: True
- volume_size: 75
+ - name: launch an instance
+ os_server:
+ name: vm1
+ state: present
+ cloud: mordred
+ region_name: ams01
+ image: Ubuntu Server 14.04
+ flavor_ram: 4096
+ boot_from_volume: True
+ volume_size: 75
# Creates a new instance with 2 volumes attached
- name: launch a compute instance
hosts: localhost
tasks:
- - name: launch an instance
- os_server:
- name: vm1
- state: present
- cloud: mordred
- region_name: ams01
- image: Ubuntu Server 14.04
- flavor_ram: 4096
- volumes:
- - photos
- - music
+ - name: launch an instance
+ os_server:
+ name: vm1
+ state: present
+ cloud: mordred
+ region_name: ams01
+ image: Ubuntu Server 14.04
+ flavor_ram: 4096
+ volumes:
+ - photos
+ - music
'''
@@ -374,7 +388,7 @@ def _exit_hostvars(module, cloud, server, changed=True):
def _parse_nics(nics):
for net in nics:
- if type(net) == str:
+ if isinstance(net, str):
for nic in net.split(','):
yield dict((nic.split('='),))
else:
@@ -384,11 +398,11 @@ def _network_args(module, cloud):
args = []
nics = module.params['nics']
- if type(nics) != list:
+ if not isinstance(nics, list):
module.fail_json(msg='The \'nics\' parameter must be a list.')
for net in _parse_nics(nics):
- if type(net) != dict:
+ if not isinstance(net, dict):
module.fail_json(
msg='Each entry in the \'nics\' parameter must be a dict.')
@@ -445,7 +459,7 @@ def _create_server(module, cloud):
nics = _network_args(module, cloud)
- if type(module.params['meta']) is str:
+ if isinstance(module.params['meta'], str):
metas = {}
for kv_str in module.params['meta'].split(","):
k, v = kv_str.split("=")
@@ -475,6 +489,7 @@ def _create_server(module, cloud):
boot_volume=module.params['boot_volume'],
boot_from_volume=module.params['boot_from_volume'],
terminate_volume=module.params['terminate_volume'],
+ reuse_ips=module.params['reuse_ips'],
wait=module.params['wait'], timeout=module.params['timeout'],
**bootkwargs
)
@@ -575,6 +590,7 @@ def main():
scheduler_hints = dict(default=None, type='dict'),
state = dict(default='present', choices=['absent', 'present']),
delete_fip = dict(default=False, type='bool'),
+ reuse_ips = dict(default=True, type='bool'),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
diff --git a/cloud/openstack/os_server_actions.py b/cloud/openstack/os_server_actions.py
index 44ff6afc8a8..ae37e358a84 100644
--- a/cloud/openstack/os_server_actions.py
+++ b/cloud/openstack/os_server_actions.py
@@ -25,6 +25,10 @@
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_server_actions
@@ -35,6 +39,7 @@
description:
- Perform server actions on an existing compute instance from OpenStack.
This module does not return any data other than changed true/false.
+ When I(action) is 'rebuild', then I(image) parameter is required.
options:
server:
description:
@@ -55,8 +60,14 @@
description:
- Perform the given action. The lock and unlock actions always return
changed as the servers API does not provide lock status.
- choices: [stop, start, pause, unpause, lock, unlock, suspend, resume]
+ choices: [stop, start, pause, unpause, lock, unlock, suspend, resume,
+ rebuild]
default: present
+ image:
+ description:
+ - Image the server should be rebuilt with
+ default: null
+ version_added: "2.3"
requirements:
- "python >= 2.6"
- "shade"
@@ -82,7 +93,8 @@
'lock': 'ACTIVE', # API doesn't show lock/unlock status
'unlock': 'ACTIVE',
'suspend': 'SUSPENDED',
- 'resume': 'ACTIVE',}
+ 'resume': 'ACTIVE',
+ 'rebuild': 'ACTIVE'}
_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock']
@@ -113,11 +125,15 @@ def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
action=dict(required=True, choices=['stop', 'start', 'pause', 'unpause',
- 'lock', 'unlock', 'suspend', 'resume']),
+ 'lock', 'unlock', 'suspend', 'resume',
+ 'rebuild']),
+ image=dict(required=False),
)
module_kwargs = openstack_module_kwargs()
- module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs)
+ module = AnsibleModule(argument_spec, supports_check_mode=True,
+ required_if=[('action', 'rebuild', ['image'])],
+ **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
@@ -125,6 +141,7 @@ def main():
action = module.params['action']
wait = module.params['wait']
timeout = module.params['timeout']
+ image = module.params['image']
try:
if action in _admin_actions:
@@ -203,6 +220,18 @@ def main():
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
+ elif action == 'rebuild':
+ image = cloud.get_image(image)
+
+ if image is None:
+ module.fail_json(msg="Image does not exist")
+
+ # rebuild doesn't set a state, just do it
+ cloud.nova_client.servers.rebuild(server=server.id, image=image.id)
+ if wait:
+ _wait(timeout, cloud, server, action)
+ module.exit_json(changed=True)
+
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
diff --git a/cloud/openstack/os_server_facts.py b/cloud/openstack/os_server_facts.py
index bcda1d4f78f..efeb7780691 100644
--- a/cloud/openstack/os_server_facts.py
+++ b/cloud/openstack/os_server_facts.py
@@ -24,6 +24,10 @@
except ImportError:
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_server_facts
diff --git a/cloud/openstack/os_server_volume.py b/cloud/openstack/os_server_volume.py
index e71e0954a20..a6549649d87 100644
--- a/cloud/openstack/os_server_volume.py
+++ b/cloud/openstack/os_server_volume.py
@@ -25,6 +25,10 @@
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_server_volume
diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py
index d4efe8727f2..54b12adf327 100644
--- a/cloud/openstack/os_subnet.py
+++ b/cloud/openstack/os_subnet.py
@@ -23,6 +23,10 @@
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_subnet
@@ -42,7 +46,7 @@
network_name:
description:
- Name of the network to which the subnet should be attached
- - requried when I(state) is 'present'
+ - Required when I(state) is 'present'
required: false
name:
description:
diff --git a/cloud/openstack/os_subnets_facts.py b/cloud/openstack/os_subnets_facts.py
index 8d853de76de..b4beedab5a1 100644
--- a/cloud/openstack/os_subnets_facts.py
+++ b/cloud/openstack/os_subnets_facts.py
@@ -21,6 +21,10 @@
except ImportError:
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_subnets_facts
@@ -46,28 +50,34 @@
'''
EXAMPLES = '''
-# Gather facts about previously created subnets
-- os_subnets_facts:
+- name: Gather facts about previously created subnets
+ os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
-- debug: var=openstack_subnets
-# Gather facts about a previously created subnet by name
-- os_subnets_facts:
+- name: Show openstack subnets
+ debug:
+ var: openstack_subnets
+
+- name: Gather facts about a previously created subnet by name
+ os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
- name: subnet1
-- debug: var=openstack_subnets
+ name: subnet1
-# Gather facts about a previously created subnet with filter (note: name and
- filters parameters are Not mutually exclusive)
-- os_subnets_facts:
+- name: Show openstack subnets
+ debug:
+ var: openstack_subnets
+
+- name: Gather facts about a previously created subnet with filter
+ # Note: name and filters parameters are not mutually exclusive
+ os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
@@ -75,7 +85,10 @@
project_name: someproject
filters:
tenant_id: 55e2ce24b2a245b09f181bf025724cbe
-- debug: var=openstack_subnets
+
+- name: Show openstack subnets
+ debug:
+ var: openstack_subnets
'''
RETURN = '''
diff --git a/cloud/openstack/os_user.py b/cloud/openstack/os_user.py
index 1e148e3d946..2ef4fe75e94 100644
--- a/cloud/openstack/os_user.py
+++ b/cloud/openstack/os_user.py
@@ -21,6 +21,10 @@
except ImportError:
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_user
@@ -41,9 +45,16 @@
password:
description:
- Password for the user
- - Required when I(state) is present
required: false
default: None
+ update_password:
+ required: false
+ default: always
+ choices: ['always', 'on_create']
+ version_added: "2.3"
+ description:
+ - C(always) will attempt to update password. C(on_create) will only
+ set the password for newly created users.
email:
description:
- Email address for the user
@@ -90,6 +101,17 @@
cloud: mycloud
state: absent
name: demouser
+
+# Create a user but don't update password if user exists
+- os_user:
+ cloud: mycloud
+ state: present
+ name: demouser
+ password: secret
+ update_password: on_create
+ email: demo@example.com
+ domain: default
+ default_project: demo
'''
@@ -121,19 +143,41 @@
sample: "demouser"
'''
-def _needs_update(module, user):
- keys = ('email', 'default_project', 'domain', 'enabled')
- for key in keys:
- if module.params[key] is not None and module.params[key] != user.get(key):
+def _needs_update(params_dict, user):
+ for k, v in params_dict.items():
+ if k not in ('password', 'update_password') and user[k] != v:
return True
# We don't get password back in the user object, so assume any supplied
# password is a change.
- if module.params['password'] is not None:
+ if (params_dict['password'] is not None and
+ params_dict['update_password'] == 'always'):
return True
return False
+def _get_domain_id(cloud, domain):
+ try:
+ # We assume admin is passing domain id
+ domain_id = cloud.get_domain(domain)['id']
+ except:
+ # If we fail, maybe admin is passing a domain name.
+ # Note that domains have unique names, just like id.
+ try:
+ domain_id = cloud.search_domains(filters={'name': domain})[0]['id']
+ except:
+ # Ok, let's hope the user is non-admin and passing a sane id
+ domain_id = domain
+
+ return domain_id
+
+def _get_default_project_id(cloud, default_project):
+ project = cloud.get_project(default_project)
+ if not project:
+ module.fail_json(msg='Default project %s is not valid' % default_project)
+
+ return project['id']
+
def main():
argument_spec = openstack_full_argument_spec(
@@ -144,14 +188,13 @@ def main():
domain=dict(required=False, default=None),
enabled=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
+ update_password=dict(default='always', choices=['always',
+ 'on_create']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
- required_if=[
- ('state', 'present', ['password'])
- ],
**module_kwargs)
if not HAS_SHADE:
@@ -164,47 +207,53 @@ def main():
domain = module.params['domain']
enabled = module.params['enabled']
state = module.params['state']
+ update_password = module.params['update_password']
try:
cloud = shade.openstack_cloud(**module.params)
user = cloud.get_user(name)
+ domain_id = None
if domain:
opcloud = shade.operator_cloud(**module.params)
- try:
- # We assume admin is passing domain id
- dom = opcloud.get_domain(domain)['id']
- domain = dom
- except:
- # If we fail, maybe admin is passing a domain name.
- # Note that domains have unique names, just like id.
- try:
- dom = opcloud.search_domains(filters={'name': domain})[0]['id']
- domain = dom
- except:
- # Ok, let's hope the user is non-admin and passing a sane id
- pass
+ domain_id = _get_domain_id(opcloud, domain)
if state == 'present':
- project_id = None
+ if update_password in ('always', 'on_create'):
+ if not password:
+ msg = ("update_password is %s but a password value is "
+ "missing") % update_password
+ self.fail_json(msg=msg)
+ default_project_id = None
if default_project:
- project = cloud.get_project(default_project)
- if not project:
- module.fail_json(msg='Default project %s is not valid' % default_project)
- project_id = project['id']
+ default_project_id = _get_default_project_id(cloud, default_project)
if user is None:
user = cloud.create_user(
name=name, password=password, email=email,
- default_project=default_project, domain_id=domain,
+ default_project=default_project_id, domain_id=domain_id,
enabled=enabled)
changed = True
else:
- if _needs_update(module, user):
- user = cloud.update_user(
- user['id'], password=password, email=email,
- default_project=project_id, domain_id=domain,
- enabled=enabled)
+ params_dict = {'email': email, 'enabled': enabled,
+ 'password': password,
+ 'update_password': update_password}
+ if domain_id is not None:
+ params_dict['domain_id'] = domain_id
+ if default_project_id is not None:
+ params_dict['default_project_id'] = default_project_id
+
+ if _needs_update(params_dict, user):
+ if update_password == 'always':
+ user = cloud.update_user(
+ user['id'], password=password, email=email,
+ default_project=default_project_id,
+ domain_id=domain_id, enabled=enabled)
+ else:
+ user = cloud.update_user(
+ user['id'], email=email,
+ default_project=default_project_id,
+ domain_id=domain_id, enabled=enabled)
changed = True
else:
changed = False
diff --git a/cloud/openstack/os_user_group.py b/cloud/openstack/os_user_group.py
index 9d21a7f033f..2f39f346792 100644
--- a/cloud/openstack/os_user_group.py
+++ b/cloud/openstack/os_user_group.py
@@ -21,6 +21,10 @@
except ImportError:
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_user_group
diff --git a/cloud/openstack/os_volume.py b/cloud/openstack/os_volume.py
index 9e7436e1a5f..6d6cc08d749 100644
--- a/cloud/openstack/os_volume.py
+++ b/cloud/openstack/os_volume.py
@@ -23,6 +23,10 @@
HAS_SHADE = False
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: os_volume
diff --git a/cloud/rackspace/rax.py b/cloud/rackspace/rax.py
index 64105d32ed0..e9a1fd48768 100644
--- a/cloud/rackspace/rax.py
+++ b/cloud/rackspace/rax.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax
@@ -182,7 +186,7 @@
description:
- how long before wait gives up, in seconds
default: 300
-author:
+author:
- "Jesse Keating (@j2sol)"
- "Matt Martz (@sivel)"
notes:
@@ -287,7 +291,7 @@ def create(module, names=[], flavor=None, image=None, meta={}, key_name=None,
if user_data and os.path.isfile(os.path.expanduser(user_data)):
try:
- user_data = os.path.expanduser('user_data')
+ user_data = os.path.expanduser(user_data)
f = open(user_data)
user_data = f.read()
f.close()
@@ -890,4 +894,6 @@ def main():
from ansible.module_utils.rax import *
# invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_cbs.py b/cloud/rackspace/rax_cbs.py
index c54ee580886..a09ce53def1 100644
--- a/cloud/rackspace/rax_cbs.py
+++ b/cloud/rackspace/rax_cbs.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_cbs
@@ -233,4 +237,6 @@ def main():
from ansible.module_utils.rax import *
# invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_cbs_attachments.py b/cloud/rackspace/rax_cbs_attachments.py
index 3b014d465fc..0c8032b35eb 100644
--- a/cloud/rackspace/rax_cbs_attachments.py
+++ b/cloud/rackspace/rax_cbs_attachments.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_cbs_attachments
@@ -218,4 +222,6 @@ def main():
from ansible.module_utils.rax import *
### invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_cdb.py b/cloud/rackspace/rax_cdb.py
index ec659222e88..4706457ae61 100644
--- a/cloud/rackspace/rax_cdb.py
+++ b/cloud/rackspace/rax_cdb.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_cdb
@@ -258,4 +262,6 @@ def main():
from ansible.module_utils.rax import *
# invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_cdb_database.py b/cloud/rackspace/rax_cdb_database.py
index 45d0f24722d..d2f061d4a93 100644
--- a/cloud/rackspace/rax_cdb_database.py
+++ b/cloud/rackspace/rax_cdb_database.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: rax_cdb_database
short_description: 'create / delete a database in the Cloud Databases'
@@ -172,4 +176,6 @@ def main():
from ansible.module_utils.rax import *
# invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_cdb_user.py b/cloud/rackspace/rax_cdb_user.py
index ff54365ea16..7fa1bc50485 100644
--- a/cloud/rackspace/rax_cdb_user.py
+++ b/cloud/rackspace/rax_cdb_user.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_cdb_user
@@ -217,4 +221,6 @@ def main():
from ansible.module_utils.rax import *
# invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_clb.py b/cloud/rackspace/rax_clb.py
index f02d6488db0..9d4d75c2291 100644
--- a/cloud/rackspace/rax_clb.py
+++ b/cloud/rackspace/rax_clb.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_clb
@@ -305,4 +309,6 @@ def main():
from ansible.module_utils.rax import *
### invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_clb_nodes.py b/cloud/rackspace/rax_clb_nodes.py
index 31bba7bf525..844834d7476 100644
--- a/cloud/rackspace/rax_clb_nodes.py
+++ b/cloud/rackspace/rax_clb_nodes.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_clb_nodes
@@ -278,4 +282,6 @@ def main():
from ansible.module_utils.rax import *
# invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_dns.py b/cloud/rackspace/rax_dns.py
index 6024fea1271..eb62eaac356 100644
--- a/cloud/rackspace/rax_dns.py
+++ b/cloud/rackspace/rax_dns.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_dns
@@ -170,4 +174,6 @@ def main():
from ansible.module_utils.rax import *
### invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_dns_record.py b/cloud/rackspace/rax_dns_record.py
index c5227174ac3..1499b09eb68 100644
--- a/cloud/rackspace/rax_dns_record.py
+++ b/cloud/rackspace/rax_dns_record.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_dns_record
@@ -345,4 +349,6 @@ def main():
from ansible.module_utils.rax import *
### invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_facts.py b/cloud/rackspace/rax_facts.py
index 8923718e5e7..8c49e8df988 100644
--- a/cloud/rackspace/rax_facts.py
+++ b/cloud/rackspace/rax_facts.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_facts
@@ -143,4 +147,6 @@ def main():
from ansible.module_utils.rax import *
### invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_files.py b/cloud/rackspace/rax_files.py
index 48d5db21284..aac6b8d5bf1 100644
--- a/cloud/rackspace/rax_files.py
+++ b/cloud/rackspace/rax_files.py
@@ -19,6 +19,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_files
@@ -86,10 +90,12 @@
gather_facts: no
tasks:
- name: "List all containers"
- rax_files: state=list
+ rax_files:
+ state: list
- name: "Create container called 'mycontainer'"
- rax_files: container=mycontainer
+ rax_files:
+ container: mycontainer
- name: "Create container 'mycontainer2' with metadata"
rax_files:
@@ -99,19 +105,30 @@
file_for: someuser@example.com
- name: "Set a container's web index page"
- rax_files: container=mycontainer web_index=index.html
+ rax_files:
+ container: mycontainer
+ web_index: index.html
- name: "Set a container's web error page"
- rax_files: container=mycontainer web_error=error.html
+ rax_files:
+ container: mycontainer
+ web_error: error.html
- name: "Make container public"
- rax_files: container=mycontainer public=yes
+ rax_files:
+ container: mycontainer
+ public: yes
- name: "Make container public with a 24 hour TTL"
- rax_files: container=mycontainer public=yes ttl=86400
+ rax_files:
+ container: mycontainer
+ public: yes
+ ttl: 86400
- name: "Make container private"
- rax_files: container=mycontainer private=yes
+ rax_files:
+ container: mycontainer
+ private: yes
- name: "Test Cloud Files Containers Metadata Storage"
hosts: local
@@ -376,4 +393,6 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_files_objects.py b/cloud/rackspace/rax_files_objects.py
index d89a8067093..a1124913aef 100644
--- a/cloud/rackspace/rax_files_objects.py
+++ b/cloud/rackspace/rax_files_objects.py
@@ -19,6 +19,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_files_objects
@@ -102,28 +106,50 @@
gather_facts: False
tasks:
- name: "Get objects from test container"
- rax_files_objects: container=testcont dest=~/Downloads/testcont
+ rax_files_objects:
+ container: testcont
+ dest: ~/Downloads/testcont
- name: "Get single object from test container"
- rax_files_objects: container=testcont src=file1 dest=~/Downloads/testcont
+ rax_files_objects:
+ container: testcont
+ src: file1
+ dest: ~/Downloads/testcont
- name: "Get several objects from test container"
- rax_files_objects: container=testcont src=file1,file2,file3 dest=~/Downloads/testcont
+ rax_files_objects:
+ container: testcont
+ src: file1,file2,file3
+ dest: ~/Downloads/testcont
- name: "Delete one object in test container"
- rax_files_objects: container=testcont method=delete dest=file1
+ rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file1
- name: "Delete several objects in test container"
- rax_files_objects: container=testcont method=delete dest=file2,file3,file4
+ rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file2,file3,file4
- name: "Delete all objects in test container"
- rax_files_objects: container=testcont method=delete
+ rax_files_objects:
+ container: testcont
+ method: delete
- name: "Upload all files to test container"
- rax_files_objects: container=testcont method=put src=~/Downloads/onehundred
+ rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/onehundred
- name: "Upload one file to test container"
- rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file1
+ rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file1
- name: "Upload one file to test container with metadata"
rax_files_objects:
@@ -135,14 +161,25 @@
who_uploaded_this: someuser@example.com
- name: "Upload one file to test container with TTL of 60 seconds"
- rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file3 expires=60
+ rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file3
+ expires: 60
- name: "Attempt to get remote object that does not exist"
- rax_files_objects: container=testcont method=get src=FileThatDoesNotExist.jpg dest=~/Downloads/testcont
+ rax_files_objects:
+ container: testcont
+ method: get
+ src: FileThatDoesNotExist.jpg
+ dest: ~/Downloads/testcont
ignore_errors: yes
- name: "Attempt to delete remote object that does not exist"
- rax_files_objects: container=testcont method=delete dest=FileThatDoesNotExist.jpg
+ rax_files_objects:
+ container: testcont
+ method: delete
+ dest: FileThatDoesNotExist.jpg
ignore_errors: yes
- name: "Test Cloud Files Objects Metadata"
@@ -150,10 +187,16 @@
gather_facts: false
tasks:
- name: "Get metadata on one object"
- rax_files_objects: container=testcont type=meta dest=file2
+ rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file2
- name: "Get metadata on several objects"
- rax_files_objects: container=testcont type=meta src=file2,file1
+ rax_files_objects:
+ container: testcont
+ type: meta
+ src: file2,file1
- name: "Set metadata on an object"
rax_files_objects:
@@ -167,7 +210,10 @@
clear_meta: true
- name: "Verify metadata is set"
- rax_files_objects: container=testcont type=meta src=file17
+ rax_files_objects:
+ container: testcont
+ type: meta
+ src: file17
- name: "Delete metadata"
rax_files_objects:
@@ -180,7 +226,9 @@
key2: ''
- name: "Get metadata on all objects"
- rax_files_objects: container=testcont type=meta
+ rax_files_objects:
+ container: testcont
+ type: meta
'''
try:
@@ -573,4 +621,6 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_identity.py b/cloud/rackspace/rax_identity.py
index b4d1cc3f28a..baa856447a3 100644
--- a/cloud/rackspace/rax_identity.py
+++ b/cloud/rackspace/rax_identity.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_identity
@@ -106,4 +110,6 @@ def main():
from ansible.module_utils.rax import *
# invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_keypair.py b/cloud/rackspace/rax_keypair.py
index 0e9585b24c9..5fab5ca79a5 100644
--- a/cloud/rackspace/rax_keypair.py
+++ b/cloud/rackspace/rax_keypair.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_keypair
@@ -171,4 +175,6 @@ def main():
from ansible.module_utils.rax import *
### invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_meta.py b/cloud/rackspace/rax_meta.py
index 724513a3120..18bce5a4f4e 100644
--- a/cloud/rackspace/rax_meta.py
+++ b/cloud/rackspace/rax_meta.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_meta
@@ -175,4 +179,6 @@ def main():
from ansible.module_utils.rax import *
### invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_network.py b/cloud/rackspace/rax_network.py
index 59b1cd6554a..7ff6edd0eca 100644
--- a/cloud/rackspace/rax_network.py
+++ b/cloud/rackspace/rax_network.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_network
@@ -143,4 +147,6 @@ def main():
from ansible.module_utils.rax import *
### invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_queue.py b/cloud/rackspace/rax_queue.py
index fc2c1ae8d5c..a1112460db8 100644
--- a/cloud/rackspace/rax_queue.py
+++ b/cloud/rackspace/rax_queue.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_queue
@@ -144,4 +148,6 @@ def main():
from ansible.module_utils.rax import *
### invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_scaling_group.py b/cloud/rackspace/rax_scaling_group.py
index 952e215c72a..95aef91cc57 100644
--- a/cloud/rackspace/rax_scaling_group.py
+++ b/cloud/rackspace/rax_scaling_group.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_scaling_group
@@ -426,4 +430,6 @@ def main():
from ansible.module_utils.rax import *
# invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_scaling_policy.py b/cloud/rackspace/rax_scaling_policy.py
index 33ebc60779d..c56cee50dd2 100644
--- a/cloud/rackspace/rax_scaling_policy.py
+++ b/cloud/rackspace/rax_scaling_policy.py
@@ -16,6 +16,10 @@
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rax_scaling_policy
@@ -280,4 +284,6 @@ def main():
from ansible.module_utils.rax import *
# invoke the module
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py
index 3e8426e90f7..5425db6f89e 100644
--- a/cloud/vmware/vsphere_guest.py
+++ b/cloud/vmware/vsphere_guest.py
@@ -19,6 +19,7 @@
# TODO:
# Ability to set CPU/Memory reservations
+
try:
import json
except ImportError:
@@ -36,6 +37,10 @@
import ssl
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: vsphere_guest
@@ -262,7 +267,7 @@
vm_extra_config:
folder: MyFolder
-# Task to gather facts from a vSphere cluster only if the system is a VMWare guest
+# Task to gather facts from a vSphere cluster only if the system is a VMware guest
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
@@ -736,7 +741,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
try:
if not vmTarget:
- cloneArgs = dict(resourcepool=rpmor, power_on=power_on_after_clone)
+ cloneArgs = dict(resourcepool=rpmor, power_on=False)
if snapshot_to_clone is not None:
#check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone.
@@ -748,6 +753,18 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
cloneArgs["folder"] = vm_extra_config.get("folder")
vmTemplate.clone(guest, **cloneArgs)
+
+ vm = vsphere_client.get_vm_by_name(guest)
+
+ # VM was created. If there is any extra config options specified, set
+ if vm_extra_config:
+ vm.set_extra_config(vm_extra_config)
+
+ # Power on if asked
+ if power_on_after_clone == True:
+ state = 'powered_on'
+ power_state(vm, state, True)
+
changed = True
else:
changed = False
@@ -974,7 +991,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
disk_num = 0
dev_changes = []
disks_changed = {}
- for disk in sorted(vm_disk.iterkeys()):
+ for disk in sorted(vm_disk):
try:
disksize = int(vm_disk[disk]['size_gb'])
# Convert the disk size to kilobytes
@@ -1330,7 +1347,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
if vm_disk:
disk_num = 0
disk_key = 0
- for disk in sorted(vm_disk.iterkeys()):
+ for disk in sorted(vm_disk):
try:
datastore = vm_disk[disk]['datastore']
except KeyError:
@@ -1386,7 +1403,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
add_floppy(module, vsphere_client, config_target, config, devices,
default_devs, floppy_type, floppy_image_path)
if vm_nic:
- for nic in sorted(vm_nic.iterkeys()):
+ for nic in sorted(vm_nic):
try:
nictype = vm_nic[nic]['type']
except KeyError:
@@ -1547,6 +1564,7 @@ def gather_facts(vm):
'hw_guest_full_name': vm.properties.config.guestFullName,
'hw_guest_id': vm.properties.config.guestId,
'hw_product_uuid': vm.properties.config.uuid,
+ 'hw_instance_uuid': vm.properties.config.instanceUuid,
'hw_processor_count': vm.properties.config.hardware.numCPU,
'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
'hw_interfaces':[],
diff --git a/commands/command.py b/commands/command.py
index 9892e7c541a..9b8afe3ef58 100644
--- a/commands/command.py
+++ b/commands/command.py
@@ -19,6 +19,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: command
@@ -28,7 +32,7 @@
- The M(command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($HOME) and operations
- like C("<"), C(">"), C("|"), and C("&") will not work (use the M(shell)
+ like C("<"), C(">"), C("|"), C(";") and C("&") will not work (use the M(shell)
module if you need these features).
options:
free_form:
@@ -95,13 +99,13 @@
import datetime
import glob
-import re
import shlex
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import b
+
def check_command(commandline):
arguments = { 'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
'ln': 'state=link', 'mkdir': 'state=directory',
@@ -142,9 +146,9 @@ def main():
shell = module.params['_uses_shell']
chdir = module.params['chdir']
executable = module.params['executable']
- args = module.params['_raw_params']
- creates = module.params['creates']
- removes = module.params['removes']
+ args = module.params['_raw_params']
+ creates = module.params['creates']
+ removes = module.params['removes']
warn = module.params['warn']
if args.strip() == '':
@@ -163,20 +167,18 @@ def main():
cmd=args,
stdout="skipped, since %s exists" % creates,
changed=False,
- stderr=False,
rc=0
)
if removes:
- # do not run the command if the line contains removes=filename
- # and the filename does not exist. This allows idempotence
- # of command executions.
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
if not glob.glob(removes):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % removes,
changed=False,
- stderr=False,
rc=0
)
@@ -188,7 +190,7 @@ def main():
args = shlex.split(args)
startd = datetime.datetime.now()
- rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell)
+ rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None)
endd = datetime.datetime.now()
delta = endd - startd
diff --git a/commands/raw.py b/commands/raw.py
index 36c3b38f430..3d6f315624e 100644
--- a/commands/raw.py
+++ b/commands/raw.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: raw
diff --git a/commands/script.py b/commands/script.py
index 1d07bbad80e..47a6571455f 100644
--- a/commands/script.py
+++ b/commands/script.py
@@ -13,6 +13,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: script
@@ -47,7 +51,7 @@
notes:
- It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points!
- The ssh connection plugin will force psuedo-tty allocation via -tt when scripts are executed. psuedo-ttys do not have a stderr channel and all stderr is sent to stdout. If you depend on separated stdout and stderr result keys, please switch to a copy+command set of tasks instead of using script.
-author:
+author:
- Ansible Core Team
- Michael DeHaan
"""
@@ -57,8 +61,12 @@
- script: /some/local/script.sh --some-arguments 1234
# Run a script that creates a file, but only if the file is not yet created
-- script: /some/local/create_file.sh --some-arguments 1234 creates=/the/created/file.txt
+- script: /some/local/create_file.sh --some-arguments 1234
+ args:
+ creates: /the/created/file.txt
# Run a script that removes a file, but only if the file is not yet removed
-- script: /some/local/remove_file.sh --some-arguments 1234 removes=/the/removed/file.txt
+- script: /some/local/remove_file.sh --some-arguments 1234
+ args:
+ removes: /the/removed/file.txt
'''
diff --git a/commands/shell.py b/commands/shell.py
index 96bbae5e296..93d187b81ec 100644
--- a/commands/shell.py
+++ b/commands/shell.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: shell
@@ -68,11 +72,11 @@
playbooks will follow the trend of using M(command) unless M(shell) is
explicitly required. When running ad-hoc commands, use your best
judgement.
- - To sanitize any variables passed to the shell module, you should use
+ - To sanitize any variables passed to the shell module, you should use
"{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons.
requirements: [ ]
-author:
+author:
- Ansible Core Team
- Michael DeHaan
'''
@@ -83,7 +87,9 @@
- shell: somescript.sh >> somelog.txt
# Change the working directory to somedir/ before executing the command.
-- shell: somescript.sh >> somelog.txt chdir=somedir/
+- shell: somescript.sh >> somelog.txt
+ args:
+ chdir: somedir/
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
@@ -146,4 +152,4 @@
returned: always
type: list of strings
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
-'''
\ No newline at end of file
+'''
diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py
index 9d765e2bc74..b3ae64f206c 100644
--- a/database/mysql/mysql_db.py
+++ b/database/mysql/mysql_db.py
@@ -19,6 +19,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: mysql_db
@@ -78,18 +82,33 @@
'''
EXAMPLES = '''
-# Create a new database with name 'bobdata'
-- mysql_db: name=bobdata state=present
+- name: Create a new database with name 'bobdata'
+ mysql_db:
+ name: bobdata
+ state: present
# Copy database dump file to remote host and restore it to database 'my_db'
-- copy: src=dump.sql.bz2 dest=/tmp
-- mysql_db: name=my_db state=import target=/tmp/dump.sql.bz2
-
-# Dumps all databases to hostname.sql
-- mysql_db: state=dump name=all target=/tmp/{{ inventory_hostname }}.sql
-
-# Imports file.sql similiar to mysql -u -p < hostname.sql
-- mysql_db: state=import name=all target=/tmp/{{ inventory_hostname }}.sql
+- name: Copy database dump file
+ copy:
+ src: dump.sql.bz2
+ dest: /tmp
+- name: Restore database
+ mysql_db:
+ name: my_db
+ state: import
+ target: /tmp/dump.sql.bz2
+
+- name: Dump all databases to hostname.sql
+ mysql_db:
+ state: dump
+ name: all
+ target: /tmp/{{ inventory_hostname }}.sql
+
+- name: Import file.sql similar to mysql -u -p < hostname.sql
+ mysql_db:
+ state: import
+ name: all
+ target: /tmp/{{ inventory_hostname }}.sql
'''
import os
@@ -317,7 +336,7 @@ def main():
else:
rc, stdout, stderr = db_dump(module, login_host, login_user,
login_password, db, target, all_databases,
- login_port, config_file, socket, ssl_cert, ssl_key, ssl_ca)
+ login_port, config_file, socket, ssl_cert, ssl_key, ssl_ca, single_transaction, quick)
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py
index 961697b9dd9..286106fe711 100644
--- a/database/mysql/mysql_user.py
+++ b/database/mysql/mysql_user.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: mysql_user
@@ -103,7 +107,7 @@
without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing
the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from
the file."
- - Currently, there is only support for the `mysql_native_password` encryted password hash module.
+ - Currently, there is only support for the `mysql_native_password` encrypted password hash module.
author: "Jonathan Mainguy (@Jmainguy)"
extends_documentation_fragment: mysql
@@ -111,43 +115,89 @@
EXAMPLES = """
# Removes anonymous user account for localhost
-- mysql_user: name='' host=localhost state=absent
+- mysql_user:
+ name: ''
+ host: localhost
+ state: absent
# Removes all anonymous user accounts
-- mysql_user: name='' host_all=yes state=absent
+- mysql_user:
+ name: ''
+ host_all: yes
+ state: absent
# Create database user with name 'bob' and password '12345' with all database privileges
-- mysql_user: name=bob password=12345 priv=*.*:ALL state=present
+- mysql_user:
+ name: bob
+ password: 12345
+ priv: '*.*:ALL'
+ state: present
# Create database user with name 'bob' and previously hashed mysql native password '*EE0D72C1085C46C5278932678FBE2C6A782821B4' with all database privileges
-- mysql_user: name=bob password='*EE0D72C1085C46C5278932678FBE2C6A782821B4' encrypted=yes priv=*.*:ALL state=present
+- mysql_user:
+ name: bob
+ password: '*EE0D72C1085C46C5278932678FBE2C6A782821B4'
+ encrypted: yes
+ priv: '*.*:ALL'
+ state: present
# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION'
-- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present
+- mysql_user:
+ name: bob
+ password: 12345
+ priv: '*.*:ALL,GRANT'
+ state: present
# Modify user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
-- mysql_user: name=bob append_privs=true priv=*.*:REQUIRESSL state=present
+- mysql_user:
+ name: bob
+ append_privs: true
+ priv: '*.*:REQUIRESSL'
+ state: present
# Ensure no user named 'sally'@'localhost' exists, also passing in the auth credentials.
-- mysql_user: login_user=root login_password=123456 name=sally state=absent
+- mysql_user:
+ login_user: root
+ login_password: 123456
+ name: sally
+ state: absent
# Ensure no user named 'sally' exists at all
-- mysql_user: name=sally host_all=yes state=absent
+- mysql_user:
+ name: sally
+ host_all: yes
+ state: absent
# Specify grants composed of more than one word
-- mysql_user: name=replication password=12345 priv="*.*:REPLICATION CLIENT" state=present
+- mysql_user:
+ name: replication
+ password: 12345
+ priv: "*.*:REPLICATION CLIENT"
+ state: present
# Revoke all privileges for user 'bob' and password '12345'
-- mysql_user: name=bob password=12345 priv=*.*:USAGE state=present
+- mysql_user:
+ name: bob
+ password: 12345
+ priv: "*.*:USAGE"
+ state: present
# Example privileges string format
mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL
# Example using login_unix_socket to connect to server
-- mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock
+- mysql_user:
+ name: root
+ password: abc123
+ login_unix_socket: /var/run/mysqld/mysqld.sock
# Example of skipping binary logging while adding user 'bob'
-- mysql_user: name=bob password=12345 priv=*.*:USAGE state=present sql_log_bin=no
+- mysql_user:
+ name: bob
+ password: 12345
+ priv: "*.*:USAGE"
+ state: present
+ sql_log_bin: no
# Example .my.cnf file for setting the root password
@@ -166,6 +216,7 @@
mysqldb_found = False
else:
mysqldb_found = True
+from ansible.module_utils.six import iteritems
VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION',
'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER',
@@ -213,7 +264,7 @@ def get_mode(cursor):
def user_exists(cursor, user, host, host_all):
if host_all:
- cursor.execute("SELECT count(*) FROM user WHERE user = %s", user)
+ cursor.execute("SELECT count(*) FROM user WHERE user = %s", ([user]))
else:
cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host))
@@ -234,9 +285,8 @@ def user_add(cursor, user, host, host_all, password, encrypted, new_priv, check_
cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password))
else:
cursor.execute("CREATE USER %s@%s", (user,host))
-
if new_priv is not None:
- for db_table, priv in new_priv.iteritems():
+ for db_table, priv in iteritems(new_priv):
privileges_grant(cursor, user,host,db_table,priv)
return True
@@ -252,7 +302,7 @@ def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append
grant_option = False
if host_all:
- hostnames = user_get_hostnames(cursor, user)
+ hostnames = user_get_hostnames(cursor, [user])
else:
hostnames = [host]
@@ -293,7 +343,7 @@ def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append
if old_user_mgmt:
cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user, host, password))
else:
- cursor.execute("ALTER USER %s@%s IDENTIFIED BY %s", (user, host, password))
+ cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password BY %s", (user, host, password))
changed = True
# Handle privileges
@@ -302,7 +352,7 @@ def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append
# If the user has privileges on a db.table that doesn't appear at all in
# the new specification, then revoke all privileges on it.
- for db_table, priv in curr_priv.iteritems():
+ for db_table, priv in iteritems(curr_priv):
# If the user has the GRANT OPTION on a db.table, revoke it first.
if "GRANT" in priv:
grant_option = True
@@ -315,7 +365,7 @@ def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append
# If the user doesn't currently have any privileges on a db.table, then
# we can perform a straight grant operation.
- for db_table, priv in new_priv.iteritems():
+ for db_table, priv in iteritems(new_priv):
if db_table not in curr_priv:
if module.check_mode:
return True
@@ -342,7 +392,7 @@ def user_delete(cursor, user, host, host_all, check_mode):
return True
if host_all:
- hostnames = user_get_hostnames(cursor, user)
+ hostnames = user_get_hostnames(cursor, [user])
for hostname in hostnames:
cursor.execute("DROP USER %s@%s", (user, hostname))
@@ -415,9 +465,12 @@ def privileges_unpack(priv, mode):
for item in priv.strip().split('/'):
pieces = item.strip().split(':')
dbpriv = pieces[0].rsplit(".", 1)
- # Do not escape if privilege is for database '*' (all databases)
- if dbpriv[0].strip('`') != '*':
- pieces[0] = '%s%s%s.%s' % (quote, dbpriv[0].strip('`'), quote, dbpriv[1])
+ # Do not escape if privilege is for database or table, i.e.
+ # neither quote *. nor .*
+ for i, side in enumerate(dbpriv):
+ if side.strip('`') != '*':
+ dbpriv[i] = '%s%s%s' % (quote, side.strip('`'), quote)
+ pieces[0] = '.'.join(dbpriv)
if '(' in pieces[1]:
output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper())
diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py
index 5cb6bf6fe5b..506ff705d56 100644
--- a/database/mysql/mysql_variables.py
+++ b/database/mysql/mysql_variables.py
@@ -22,6 +22,10 @@
along with Ansible. If not, see .
"""
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: mysql_variables
@@ -44,10 +48,13 @@
'''
EXAMPLES = '''
# Check for sync_binlog setting
-- mysql_variables: variable=sync_binlog
+- mysql_variables:
+ variable: sync_binlog
# Set read_only variable to 1
-- mysql_variables: variable=read_only value=1
+- mysql_variables:
+ variable: read_only
+ value: 1
'''
diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py
old mode 100644
new mode 100755
index bfb2f7e0951..ffd22060096
--- a/database/postgresql/postgresql_db.py
+++ b/database/postgresql/postgresql_db.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: postgresql_db
@@ -95,16 +99,18 @@
EXAMPLES = '''
# Create a new database with name "acme"
-- postgresql_db: name=acme
+- postgresql_db:
+ name: acme
# Create a new database with name "acme" and specific encoding and locale
# settings. If a template different from "template0" is specified, encoding
# and locale settings must match those of the template.
-- postgresql_db: name=acme
- encoding='UTF-8'
- lc_collate='de_DE.UTF-8'
- lc_ctype='de_DE.UTF-8'
- template='template0'
+- postgresql_db:
+ name: acme
+ encoding: UTF-8
+ lc_collate: de_DE.UTF-8
+ lc_ctype: de_DE.UTF-8
+ template: template0
'''
try:
@@ -114,6 +120,7 @@
postgresqldb_found = False
else:
postgresqldb_found = True
+from ansible.module_utils.six import iteritems
class NotSupportedError(Exception):
pass
@@ -261,7 +268,7 @@ def main():
"login_password":"password",
"port":"port"
}
- kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
+ kw = dict( (params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != '' )
# If a login_unix_socket is specified, incorporate it here.
@@ -287,11 +294,11 @@ def main():
try:
if module.check_mode:
if state == "absent":
- changed = not db_exists(cursor, db)
+ changed = db_exists(cursor, db)
elif state == "present":
changed = not db_matches(cursor, db, owner, template, encoding,
lc_collate, lc_ctype)
- module.exit_json(changed=changed,db=db)
+ module.exit_json(changed=changed, db=db)
if state == "absent":
try:
diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py
index 6944b54fcc6..ae606464dc9 100644
--- a/database/postgresql/postgresql_privs.py
+++ b/database/postgresql/postgresql_privs.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: postgresql_privs
@@ -143,90 +147,90 @@
# On database "library":
# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
# TO librarian, reader WITH GRANT OPTION
-- postgresql_privs: >
- database=library
- state=present
- privs=SELECT,INSERT,UPDATE
- type=table
- objs=books,authors
- schema=public
- roles=librarian,reader
- grant_option=yes
+- postgresql_privs:
+ database: library
+ state: present
+ privs: SELECT,INSERT,UPDATE
+ type: table
+ objs: books,authors
+ schema: public
+ roles: librarian,reader
+ grant_option: yes
# Same as above leveraging default values:
-- postgresql_privs: >
- db=library
- privs=SELECT,INSERT,UPDATE
- objs=books,authors
- roles=librarian,reader
- grant_option=yes
+- postgresql_privs:
+ db: library
+ privs: SELECT,INSERT,UPDATE
+ objs: books,authors
+ roles: librarian,reader
+ grant_option: yes
# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
# Note that role "reader" will be *granted* INSERT privilege itself if this
-# isn't already the case (since state=present).
-- postgresql_privs: >
- db=library
- state=present
- priv=INSERT
- obj=books
- role=reader
- grant_option=no
+# isn't already the case (since state: present).
+- postgresql_privs:
+ db: library
+ state: present
+ priv: INSERT
+ obj: books
+ role: reader
+ grant_option: no
# REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
# "public" is the default schema. This also works for PostgreSQL 8.x.
-- postgresql_privs: >
- db=library
- state=absent
- privs=INSERT,UPDATE
- objs=ALL_IN_SCHEMA
- role=reader
+- postgresql_privs:
+ db: library
+ state: absent
+ privs: INSERT,UPDATE
+ objs: ALL_IN_SCHEMA
+ role: reader
# GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
-- postgresql_privs: >
- db=library
- privs=ALL
- type=schema
- objs=public,math
- role=librarian
+- postgresql_privs:
+ db: library
+ privs: ALL
+ type: schema
+ objs: public,math
+ role: librarian
# GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
# Note the separation of arguments with colons.
-- postgresql_privs: >
- db=library
- privs=ALL
- type=function
- obj=add(int:int)
- schema=math
- roles=librarian,reader
+- postgresql_privs:
+ db: library
+ privs: ALL
+ type: function
+ obj: add(int:int)
+ schema: math
+ roles: librarian,reader
# GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
# Note that group role memberships apply cluster-wide and therefore are not
# restricted to database "library" here.
-- postgresql_privs: >
- db=library
- type=group
- objs=librarian,reader
- roles=alice,bob
- admin_option=yes
+- postgresql_privs:
+ db: library
+ type: group
+ objs: librarian,reader
+ roles: alice,bob
+ admin_option: yes
# GRANT ALL PRIVILEGES ON DATABASE library TO librarian
-# Note that here "db=postgres" specifies the database to connect to, not the
+# Note that here "db: postgres" specifies the database to connect to, not the
# database to grant privileges on (which is specified via the "objs" param)
-- postgresql_privs: >
- db=postgres
- privs=ALL
- type=database
- obj=library
- role=librarian
+- postgresql_privs:
+ db: postgres
+ privs: ALL
+ type: database
+ obj: library
+ role: librarian
# GRANT ALL PRIVILEGES ON DATABASE library TO librarian
# If objs is omitted for type "database", it defaults to the database
# to which the connection is established
-- postgresql_privs: >
- db=library
- privs=ALL
- type=database
- role=librarian
+- postgresql_privs:
+ db: library
+ privs: ALL
+ type: database
+ role: librarian
"""
try:
@@ -537,7 +541,7 @@ def main():
port=dict(type='int', default=5432),
unix_socket=dict(default='', aliases=['login_unix_socket']),
login=dict(default='postgres', aliases=['login_user']),
- password=dict(default='', aliases=['login_password'])
+ password=dict(default='', aliases=['login_password'], no_log=True)
),
supports_check_mode = True
)
diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py
index caa1dc2dbb2..95c19caaba9 100644
--- a/database/postgresql/postgresql_user.py
+++ b/database/postgresql/postgresql_user.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: postgresql_user
@@ -142,22 +146,41 @@
EXAMPLES = '''
# Create django user and grant access to database and products table
-- postgresql_user: db=acme name=django password=ceec4eif7ya priv=CONNECT/products:ALL
+- postgresql_user:
+ db: acme
+ name: django
+ password: ceec4eif7ya
+ priv: "CONNECT/products:ALL"
# Create rails user, grant privilege to create other databases and demote rails from super user status
-- postgresql_user: name=rails password=secret role_attr_flags=CREATEDB,NOSUPERUSER
+- postgresql_user:
+ name: rails
+ password: secret
+ role_attr_flags: CREATEDB,NOSUPERUSER
# Remove test user privileges from acme
-- postgresql_user: db=acme name=test priv=ALL/products:ALL state=absent fail_on_user=no
+- postgresql_user:
+ db: acme
+ name: test
+ priv: "ALL/products:ALL"
+ state: absent
+ fail_on_user: no
# Remove test user from test database and the cluster
-- postgresql_user: db=test name=test priv=ALL state=absent
+- postgresql_user:
+ db: test
+ name: test
+ priv: ALL
+ state: absent
# Example privileges string format
INSERT,UPDATE/table:SELECT/anothertable:ALL
# Remove an existing user's password
-- postgresql_user: db=test user=test password=NULL
+- postgresql_user:
+ db: test
+ user: test
+ password: NULL
'''
import re
@@ -170,6 +193,7 @@
postgresqldb_found = False
else:
postgresqldb_found = True
+from ansible.module_utils.six import iteritems
_flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags)))
@@ -433,7 +457,7 @@ def revoke_privileges(cursor, user, privs):
changed = False
for type_ in privs:
- for name, privileges in privs[type_].iteritems():
+ for name, privileges in iteritems(privs[type_]):
# Check that any of the privileges requested to be removed are
# currently granted to the user
differences = check_funcs[type_](cursor, user, name, privileges)
@@ -451,7 +475,7 @@ def grant_privileges(cursor, user, privs):
changed = False
for type_ in privs:
- for name, privileges in privs[type_].iteritems():
+ for name, privileges in iteritems(privs[type_]):
# Check that any of the privileges requested for the user are
# currently missing
differences = check_funcs[type_](cursor, user, name, privileges)
@@ -543,11 +567,11 @@ def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
- login_password=dict(default=""),
+ login_password=dict(default="", no_log=True),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
user=dict(required=True, aliases=['name']),
- password=dict(default=None),
+ password=dict(default=None, no_log=True),
state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None),
db=dict(default=''),
@@ -595,7 +619,7 @@ def main():
"port":"port",
"db":"database"
}
- kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
+ kw = dict( (params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" )
# If a login_unix_socket is specified, incorporate it here.
@@ -662,4 +686,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/files/acl.py b/files/acl.py
index e74646f7ee3..ebd46d53c9b 100644
--- a/files/acl.py
+++ b/files/acl.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: acl
@@ -78,7 +82,7 @@
required: false
default: null
description:
- - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields.
+ - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for some types, but the type and perms are always required. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields.
recursive:
version_added: "2.0"
@@ -97,19 +101,38 @@
EXAMPLES = '''
# Grant user Joe read access to a file
-- acl: name=/etc/foo.conf entity=joe etype=user permissions="r" state=present
+- acl:
+ name: /etc/foo.conf
+ entity: joe
+ etype: user
+ permissions: r
+ state: present
# Removes the acl for Joe on a specific file
-- acl: name=/etc/foo.conf entity=joe etype=user state=absent
+- acl:
+ name: /etc/foo.conf
+ entity: joe
+ etype: user
+ state: absent
# Sets default acl for joe on foo.d
-- acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present
+- acl:
+ name: /etc/foo.d
+ entity: joe
+ etype: user
+ permissions: rw
+ default: yes
+ state: present
# Same as previous but using entry shorthand
-- acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present
+- acl:
+ name: /etc/foo.d
+ entry: "default:user:joe:rw-"
+ state: present
# Obtain the acl for a specific file
-- acl: name=/etc/foo.conf
+- acl:
+ name: /etc/foo.conf
register: acl_info
'''
@@ -202,7 +225,7 @@ def acl_changed(module, cmd):
if get_platform().lower() == 'freebsd':
return True
- cmd = cmd[:] # lists are mutables so cmd would be overriden without this
+ cmd = cmd[:] # lists are mutables so cmd would be overwritten without this
cmd.insert(1, '--test')
lines = run_acl(module, cmd)
@@ -349,4 +372,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/files/assemble.py b/files/assemble.py
index 6eea02e547b..41e7530e446 100644
--- a/files/assemble.py
+++ b/files/assemble.py
@@ -2,6 +2,7 @@
# -*- coding: utf-8 -*-
# (c) 2012, Stephen Fromm
+# (c) 2016, Toshio Kuratomi
#
# This file is part of Ansible
#
@@ -18,10 +19,9 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import os
-import os.path
-import tempfile
-import re
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -99,51 +99,70 @@
EXAMPLES = '''
# Example from Ansible Playbooks
-- assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf
+- assemble:
+ src: /etc/someapp/fragments
+ dest: /etc/someapp/someapp.conf
# When a delimiter is specified, it will be inserted in between each fragment
-- assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf delimiter='### START FRAGMENT ###'
+- assemble:
+ src: /etc/someapp/fragments
+ dest: /etc/someapp/someapp.conf
+ delimiter: '### START FRAGMENT ###'
# Copy a new "sshd_config" file into place, after passing validation with sshd
-- assemble: src=/etc/ssh/conf.d/ dest=/etc/ssh/sshd_config validate='/usr/sbin/sshd -t -f %s'
+- assemble:
+ src: /etc/ssh/conf.d/
+ dest: /etc/ssh/sshd_config
+ validate: '/usr/sbin/sshd -t -f %s'
'''
+import codecs
+import os
+import os.path
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.six import b
+
+
# ===========================================
# Support method
def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
- tmp = os.fdopen(tmpfd,'w')
+ tmp = os.fdopen(tmpfd, 'wb')
delimit_me = False
add_newline = False
for f in sorted(os.listdir(src_path)):
if compiled_regexp and not compiled_regexp.search(f):
continue
- fragment = "%s/%s" % (src_path, f)
+ fragment = u"%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
- fragment_content = file(fragment).read()
+ fragment_content = open(fragment, 'rb').read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
- tmp.write('\n')
+ tmp.write(b('\n'))
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
- delimiter = delimiter.decode('unicode-escape')
+ delimiter = codecs.escape_decode(delimiter)[0]
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
- if delimiter[-1] != '\n':
- tmp.write('\n')
+ if delimiter[-1] != b('\n'):
+ tmp.write(b('\n'))
tmp.write(fragment_content)
delimit_me = True
- if fragment_content.endswith('\n'):
+ if fragment_content.endswith(b('\n')):
add_newline = False
else:
add_newline = True
@@ -151,6 +170,7 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, igno
tmp.close()
return temp_path
+
def cleanup(path, result=None):
# cleanup just in case
if os.path.exists(path):
@@ -162,8 +182,6 @@ def cleanup(path, result=None):
if result is not None:
result['warnings'] = ['Unable to remove temp file (%s): %s' % (path, str(e))]
-# ==============================================================
-# main
def main():
@@ -201,7 +219,7 @@ def main():
if not os.path.isdir(src):
module.fail_json(msg="Source (%s) is not a directory" % src)
- if regexp != None:
+ if regexp is not None:
try:
compiled_regexp = re.compile(regexp)
except re.error:
@@ -248,8 +266,5 @@ def main():
result['msg'] = "OK"
module.exit_json(**result)
-# import module snippets
-from ansible.module_utils.basic import *
-
-main()
-
+if __name__ == '__main__':
+ main()
diff --git a/files/copy.py b/files/copy.py
index 9ec8c659525..f9bbd1baa4f 100644
--- a/files/copy.py
+++ b/files/copy.py
@@ -18,8 +18,9 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import os
-import tempfile
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -105,19 +106,43 @@
EXAMPLES = '''
# Example from Ansible Playbooks
-- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode=0644
+- copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: 0644
# The same example as above, but using a symbolic mode equivalent to 0644
-- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode="u=rw,g=r,o=r"
+- copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: "u=rw,g=r,o=r"
# Another symbolic mode example, adding some permissions and removing others
-- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode="u+rw,g-wx,o-rwx"
+- copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: "u+rw,g-wx,o-rwx"
# Copy a new "ntp.conf file into place, backing up the original if it differs from the copied version
-- copy: src=/mine/ntp.conf dest=/etc/ntp.conf owner=root group=root mode=644 backup=yes
+- copy:
+ src: /mine/ntp.conf
+ dest: /etc/ntp.conf
+ owner: root
+ group: root
+ mode: 0644
+ backup: yes
# Copy a new "sudoers" file into place, after passing validation with visudo
-- copy: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s'
+- copy:
+ src: /mine/sudoers
+ dest: /etc/sudoers
+ validate: 'visudo -cf %s'
'''
RETURN = '''
@@ -183,16 +208,28 @@
sample: "file"
'''
+import os
+import shutil
+import tempfile
+import traceback
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils._text import to_bytes, to_native
+
+
def split_pre_existing_dir(dirname):
'''
Return the first pre-existing directory and a list of the new directories that will be created.
'''
head, tail = os.path.split(dirname)
- if not os.path.exists(head):
+ b_head = to_bytes(head, errors='surrogate_or_strict')
+ if not os.path.exists(b_head):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
else:
- return (head, [ tail ])
+ return (head, [tail])
new_directory_list.append(tail)
return (pre_existing_dir, new_directory_list)
@@ -215,10 +252,10 @@ def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = dict(
- src = dict(required=False),
- original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack
+ src = dict(required=False, type='path'),
+ original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack
content = dict(required=False, no_log=True),
- dest = dict(required=True),
+ dest = dict(required=True, type='path'),
backup = dict(default=False, type='bool'),
force = dict(default=True, aliases=['thirsty'], type='bool'),
validate = dict(required=False, type='str'),
@@ -229,21 +266,23 @@ def main():
supports_check_mode=True,
)
- src = os.path.expanduser(module.params['src'])
- dest = os.path.expanduser(module.params['dest'])
+ src = module.params['src']
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ dest = module.params['dest']
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
backup = module.params['backup']
- force = module.params['force']
- original_basename = module.params.get('original_basename',None)
- validate = module.params.get('validate',None)
+ force = module.params['force']
+ original_basename = module.params.get('original_basename', None)
+ validate = module.params.get('validate', None)
follow = module.params['follow']
- mode = module.params['mode']
+ mode = module.params['mode']
remote_src = module.params['remote_src']
- if not os.path.exists(src):
+ if not os.path.exists(b_src):
module.fail_json(msg="Source %s not found" % (src))
- if not os.access(src, os.R_OK):
+ if not os.access(b_src, os.R_OK):
module.fail_json(msg="Source %s not readable" % (src))
- if os.path.isdir(src):
+ if os.path.isdir(b_src):
module.fail_json(msg="Remote copy does not support recursive copy of directory: %s" % (src))
checksum_src = module.sha1(src)
@@ -259,10 +298,12 @@ def main():
# Special handling for recursive copy - create intermediate dirs
if original_basename and dest.endswith(os.sep):
dest = os.path.join(dest, original_basename)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
dirname = os.path.dirname(dest)
- if not os.path.exists(dirname) and os.path.isabs(dirname):
+ b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
+ if not os.path.exists(b_dirname) and os.path.isabs(b_dirname):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
- os.makedirs(dirname)
+ os.makedirs(b_dirname)
directory_args = module.load_file_common_arguments(module.params)
directory_mode = module.params["directory_mode"]
if directory_mode is not None:
@@ -271,45 +312,48 @@ def main():
directory_args['mode'] = None
adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
- if os.path.exists(dest):
- if os.path.islink(dest) and follow:
- dest = os.path.realpath(dest)
+ if os.path.isdir(b_dest):
+ basename = os.path.basename(src)
+ if original_basename:
+ basename = original_basename
+ dest = os.path.join(dest, basename)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+
+ if os.path.exists(b_dest):
+ if os.path.islink(b_dest) and follow:
+ b_dest = os.path.realpath(b_dest)
+ dest = to_native(b_dest, errors='surrogate_or_strict')
if not force:
module.exit_json(msg="file already exists", src=src, dest=dest, changed=False)
- if (os.path.isdir(dest)):
- basename = os.path.basename(src)
- if original_basename:
- basename = original_basename
- dest = os.path.join(dest, basename)
- if os.access(dest, os.R_OK):
+ if os.access(b_dest, os.R_OK):
checksum_dest = module.sha1(dest)
else:
- if not os.path.exists(os.path.dirname(dest)):
+ if not os.path.exists(os.path.dirname(b_dest)):
try:
# os.path.exists() can return false in some
# circumstances where the directory does not have
# the execute bit for the current user set, in
# which case the stat() call will raise an OSError
- os.stat(os.path.dirname(dest))
+ os.stat(os.path.dirname(b_dest))
except OSError:
e = get_exception()
- if "permission denied" in str(e).lower():
+ if "permission denied" in to_native(e).lower():
module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest)))
module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest)))
- if not os.access(os.path.dirname(dest), os.W_OK):
+ if not os.access(os.path.dirname(b_dest), os.W_OK):
module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest)))
backup_file = None
- if checksum_src != checksum_dest or os.path.islink(dest):
+ if checksum_src != checksum_dest or os.path.islink(b_dest):
if not module.check_mode:
try:
if backup:
- if os.path.exists(dest):
+ if os.path.exists(b_dest):
backup_file = module.backup_local(dest)
# allow for conversion from symlink.
- if os.path.islink(dest):
- os.unlink(dest)
- open(dest, 'w').close()
+ if os.path.islink(b_dest):
+ os.unlink(b_dest)
+ open(b_dest, 'w').close()
if validate:
# if we have a mode, make sure we set it on the temporary
# file source as some validations may require it
@@ -318,14 +362,14 @@ def main():
module.set_mode_if_different(src, mode, False)
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
- (rc,out,err) = module.run_command(validate % src)
+ (rc, out, err) = module.run_command(validate % src)
if rc != 0:
module.fail_json(msg="failed to validate", exit_status=rc, stdout=out, stderr=err)
- mysrc = src
+ b_mysrc = b_src
if remote_src:
- _, mysrc = tempfile.mkstemp(dir=os.path.dirname(dest))
- shutil.copy2(src, mysrc)
- module.atomic_move(mysrc, dest, unsafe_writes=module.params['unsafe_writes'])
+ _, b_mysrc = tempfile.mkstemp(dir=os.path.dirname(b_dest))
+ shutil.copy2(b_src, b_mysrc)
+ module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'])
except IOError:
module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc())
changed = True
@@ -333,7 +377,7 @@ def main():
changed = False
res_args = dict(
- dest = dest, src = src, md5sum = md5sum_src, checksum = checksum_src, changed = changed
+ dest=dest, src=src, md5sum=md5sum_src, checksum=checksum_src, changed=changed
)
if backup_file:
res_args['backup_file'] = backup_file
@@ -345,6 +389,5 @@ def main():
module.exit_json(**res_args)
-# import module snippets
-from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/files/fetch.py b/files/fetch.py
index ad34964217e..f069a23acfb 100644
--- a/files/fetch.py
+++ b/files/fetch.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: fetch
@@ -65,7 +69,7 @@
will use the basename of the source file, similar to the copy module.
Obviously this is only handy if the filenames are unique.
requirements: []
-author:
+author:
- "Ansible Core Team"
- "Michael DeHaan"
notes:
@@ -79,14 +83,25 @@
EXAMPLES = '''
# Store file into /tmp/fetched/host.example.com/tmp/somefile
-- fetch: src=/tmp/somefile dest=/tmp/fetched
+- fetch:
+ src: /tmp/somefile
+ dest: /tmp/fetched
# Specifying a path directly
-- fetch: src=/tmp/somefile dest=/tmp/prefix-{{ inventory_hostname }} flat=yes
+- fetch:
+ src: /tmp/somefile
+ dest: /tmp/prefix-{{ inventory_hostname }}
+ flat: yes
# Specifying a destination path
-- fetch: src=/tmp/uniquefile dest=/tmp/special/ flat=yes
+- fetch:
+ src: /tmp/uniquefile
+ dest: /tmp/special/
+ flat: yes
# Storing in a path relative to the playbook
-- fetch: src=/tmp/uniquefile dest=special/prefix-{{ inventory_hostname }} flat=yes
+- fetch:
+ src: /tmp/uniquefile
+ dest: special/prefix-{{ inventory_hostname }}
+ flat: yes
'''
diff --git a/files/file.py b/files/file.py
index 11ff6cf85fc..ef98f036e8b 100644
--- a/files/file.py
+++ b/files/file.py
@@ -18,11 +18,9 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import errno
-import shutil
-import stat
-import grp
-import pwd
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -37,7 +35,7 @@
notes:
- See also M(copy), M(template), M(assemble)
requirements: [ ]
-author:
+author:
- "Ansible Core Team"
- "Michael DeHaan"
options:
@@ -94,34 +92,65 @@
EXAMPLES = '''
# change file ownership, group and mode. When specifying mode using octal numbers, first digit should always be 0.
-- file: path=/etc/foo.conf owner=foo group=foo mode=0644
-- file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link
-- file: src=/tmp/{{ item.src }} dest={{ item.dest }} state=link
+- file:
+ path: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: 0644
+- file:
+ src: /file/to/link/to
+ dest: /path/to/symlink
+ owner: foo
+ group: foo
+ state: link
+- file:
+ src: '/tmp/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ state: link
with_items:
- { src: 'x', dest: 'y' }
- { src: 'z', dest: 'k' }
# touch a file, using symbolic modes to set the permissions (equivalent to 0644)
-- file: path=/etc/foo.conf state=touch mode="u=rw,g=r,o=r"
+- file:
+ path: /etc/foo.conf
+ state: touch
+ mode: "u=rw,g=r,o=r"
# touch the same file, but add/remove some permissions
-- file: path=/etc/foo.conf state=touch mode="u+rw,g-wx,o-rwx"
+- file:
+ path: /etc/foo.conf
+ state: touch
+ mode: "u+rw,g-wx,o-rwx"
# create a directory if it doesn't exist
-- file: path=/etc/some_directory state=directory mode=0755
-
+- file:
+ path: /etc/some_directory
+ state: directory
+ mode: 0755
'''
+import errno
+import os
+import shutil
+import time
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.six import b
+from ansible.module_utils._text import to_bytes, to_native
-def get_state(path):
+
+def get_state(b_path):
''' Find out current state '''
- if os.path.lexists(path):
- if os.path.islink(path):
+ if os.path.lexists(b_path):
+ if os.path.islink(b_path):
return 'link'
- elif os.path.isdir(path):
+ elif os.path.isdir(b_path):
return 'directory'
- elif os.stat(path).st_nlink > 1:
+ elif os.stat(b_path).st_nlink > 1:
return 'hard'
else:
# could be many other things, but defaulting to file
@@ -129,70 +158,73 @@ def get_state(path):
return 'absent'
-def recursive_set_attributes(module, path, follow, file_args):
+
+def recursive_set_attributes(module, b_path, follow, file_args):
changed = False
- for root, dirs, files in os.walk(path):
- for fsobj in dirs + files:
- fsname = os.path.join(root, fsobj)
- if not os.path.islink(fsname):
+ for b_root, b_dirs, b_files in os.walk(b_path):
+ for b_fsobj in b_dirs + b_files:
+ b_fsname = os.path.join(b_root, b_fsobj)
+ if not os.path.islink(b_fsname):
tmp_file_args = file_args.copy()
- tmp_file_args['path']=fsname
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed)
else:
tmp_file_args = file_args.copy()
- tmp_file_args['path']=fsname
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed)
if follow:
- fsname = os.path.join(root, os.readlink(fsname))
- if os.path.isdir(fsname):
- changed |= recursive_set_attributes(module, fsname, follow, file_args)
+ b_fsname = os.path.join(b_root, os.readlink(b_fsname))
+ if os.path.isdir(b_fsname):
+ changed |= recursive_set_attributes(module, b_fsname, follow, file_args)
tmp_file_args = file_args.copy()
- tmp_file_args['path']=fsname
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed)
return changed
+
def main():
module = AnsibleModule(
- argument_spec = dict(
- state = dict(choices=['file','directory','link','hard','touch','absent'], default=None),
- path = dict(aliases=['dest', 'name'], required=True),
- original_basename = dict(required=False), # Internal use only, for recursive ops
- recurse = dict(default=False, type='bool'),
- force = dict(required=False, default=False, type='bool'),
- diff_peek = dict(default=None), # Internal use only, for internal checks in the action plugins
- validate = dict(required=False, default=None), # Internal use only, for template and copy
- src = dict(required=False, default=None),
+ argument_spec=dict(
+ state=dict(choices=['file', 'directory', 'link', 'hard', 'touch', 'absent'], default=None),
+ path=dict(aliases=['dest', 'name'], required=True, type='path'),
+ original_basename=dict(required=False), # Internal use only, for recursive ops
+ recurse=dict(default=False, type='bool'),
+ force=dict(required=False, default=False, type='bool'),
+ diff_peek=dict(default=None), # Internal use only, for internal checks in the action plugins
+ validate=dict(required=False, default=None), # Internal use only, for template and copy
+ src=dict(required=False, default=None, type='path'),
),
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
- state = params['state']
+ state = params['state']
force = params['force']
diff_peek = params['diff_peek']
src = params['src']
+ b_src = to_bytes(src, errors='surrogate_or_strict')
follow = params['follow']
# modify source as we later reload and pass, specially relevant when used by other modules.
- params['path'] = path = os.path.expanduser(params['path'])
+ path = params['path']
+ b_path = to_bytes(path, errors='surrogate_or_strict')
# short-circuit for diff_peek
if diff_peek is not None:
appears_binary = False
try:
- f = open(path)
- b = f.read(8192)
+ f = open(b_path, 'rb')
+ head = f.read(8192)
f.close()
- if "\x00" in b:
+ if b("\x00") in head:
appears_binary = True
except:
pass
module.exit_json(path=path, changed=False, appears_binary=appears_binary)
- prev_state = get_state(path)
-
+ prev_state = get_state(b_path)
# state should default to file, but since that creates many conflicts,
# default to 'current' when it exists.
@@ -204,18 +236,17 @@ def main():
# source is both the source of a symlink or an informational passing of the src for a template module
# or copy module, even if this module never uses it, it is needed to key off some things
- if src is not None:
- src = os.path.expanduser(src)
- else:
- if state in ['link','hard']:
+ if src is None:
+ if state in ('link', 'hard'):
if follow and state == 'link':
# use the current target of the link as the source
- src = os.path.realpath(path)
+ src = to_native(os.path.realpath(b_path), errors='strict')
+ b_src = to_bytes(os.path.realpath(b_path), errors='strict')
else:
module.fail_json(msg='src and dest are required for creating links')
# original_basename is used by other modules that depend on file.
- if os.path.isdir(path) and state not in ["link", "absent"]:
+ if os.path.isdir(b_path) and state not in ("link", "absent"):
basename = None
if params['original_basename']:
basename = params['original_basename']
@@ -223,6 +254,7 @@ def main():
basename = os.path.basename(src)
if basename:
params['path'] = path = os.path.join(path, basename)
+ b_path = to_bytes(path, errors='surrogate_or_strict')
# make sure the target path is a directory when we're doing a recursive operation
recurse = params['recurse']
@@ -232,11 +264,8 @@ def main():
file_args = module.load_file_common_arguments(params)
changed = False
- diff = {'before':
- {'path': path}
- ,
- 'after':
- {'path': path}
+ diff = {'before': {'path': path},
+ 'after': {'path': path},
}
state_change = False
@@ -250,13 +279,13 @@ def main():
if not module.check_mode:
if prev_state == 'directory':
try:
- shutil.rmtree(path, ignore_errors=False)
+ shutil.rmtree(b_path, ignore_errors=False)
except Exception:
e = get_exception()
module.fail_json(msg="rmtree failed: %s" % str(e))
else:
try:
- os.unlink(path)
+ os.unlink(b_path)
except Exception:
e = get_exception()
module.fail_json(path=path, msg="unlinking failed: %s " % str(e))
@@ -269,11 +298,12 @@ def main():
if state_change:
if follow and prev_state == 'link':
# follow symlink and operate on original
- path = os.path.realpath(path)
- prev_state = get_state(path)
+ b_path = os.path.realpath(b_path)
+ path = to_native(b_path, errors='strict')
+ prev_state = get_state(b_path)
file_args['path'] = path
- if prev_state not in ['file','hard']:
+ if prev_state not in ('file', 'hard'):
# file is not absent and any other state is a conflict
module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state))
@@ -282,8 +312,9 @@ def main():
elif state == 'directory':
if follow and prev_state == 'link':
- path = os.path.realpath(path)
- prev_state = get_state(path)
+ b_path = os.path.realpath(b_path)
+ path = to_native(b_path, errors='strict')
+ prev_state = get_state(b_path)
if prev_state == 'absent':
if module.check_mode:
@@ -301,17 +332,18 @@ def main():
# Remove leading slash if we're creating a relative path
if not os.path.isabs(path):
curpath = curpath.lstrip('/')
- if not os.path.exists(curpath):
+ b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
+ if not os.path.exists(b_curpath):
try:
- os.mkdir(curpath)
+ os.mkdir(b_curpath)
except OSError:
ex = get_exception()
# Possibly something else created the dir since the os.path.exists
# check above. As long as it's a dir, we don't need to error out.
- if not (ex.errno == errno.EEXIST and os.path.isdir(curpath)):
+ if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
raise
tmp_file_args = file_args.copy()
- tmp_file_args['path']=curpath
+ tmp_file_args['path'] = curpath
changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff)
except Exception:
e = get_exception()
@@ -324,45 +356,47 @@ def main():
changed = module.set_fs_attributes_if_different(file_args, changed, diff)
if recurse:
- changed |= recursive_set_attributes(module, file_args['path'], follow, file_args)
+ changed |= recursive_set_attributes(module, to_bytes(file_args['path'], errors='surrogate_or_strict'), follow, file_args)
module.exit_json(path=path, changed=changed, diff=diff)
- elif state in ['link','hard']:
+ elif state in ('link', 'hard'):
- if os.path.isdir(path) and not os.path.islink(path):
+ if os.path.isdir(b_path) and not os.path.islink(b_path):
relpath = path
else:
- relpath = os.path.dirname(path)
+ b_relpath = os.path.dirname(b_path)
+ relpath = to_native(b_relpath, errors='strict')
absrc = os.path.join(relpath, src)
- if not os.path.exists(absrc) and not force:
+ b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
+ if not os.path.exists(b_absrc) and not force:
module.fail_json(path=path, src=src, msg='src file does not exist, use "force=yes" if you really want to create the link: %s' % absrc)
if state == 'hard':
- if not os.path.isabs(src):
+ if not os.path.isabs(b_src):
module.fail_json(msg="absolute paths are required")
elif prev_state == 'directory':
if not force:
module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, path))
- elif len(os.listdir(path)) > 0:
+ elif len(os.listdir(b_path)) > 0:
# refuse to replace a directory that has files in it
module.fail_json(path=path, msg='the directory %s is not empty, refusing to convert it' % path)
- elif prev_state in ['file', 'hard'] and not force:
+ elif prev_state in ('file', 'hard') and not force:
module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, path))
if prev_state == 'absent':
changed = True
elif prev_state == 'link':
- old_src = os.readlink(path)
- if old_src != src:
+ b_old_src = os.readlink(b_path)
+ if b_old_src != b_src:
changed = True
elif prev_state == 'hard':
- if not (state == 'hard' and os.stat(path).st_ino == os.stat(src).st_ino):
+ if not (state == 'hard' and os.stat(b_path).st_ino == os.stat(b_src).st_ino):
changed = True
if not force:
module.fail_json(dest=path, src=src, msg='Cannot link, different hard link exists at destination')
- elif prev_state in ['file', 'directory']:
+ elif prev_state in ('file', 'directory'):
changed = True
if not force:
module.fail_json(dest=path, src=src, msg='Cannot link, %s exists at destination' % prev_state)
@@ -372,31 +406,33 @@ def main():
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
- tmppath = '/'.join([os.path.dirname(path), ".%s.%s.tmp" % (os.getpid(),time.time())])
+ b_tmppath = to_bytes(os.path.sep).join(
+ [os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
+ )
try:
if prev_state == 'directory' and (state == 'hard' or state == 'link'):
- os.rmdir(path)
+ os.rmdir(b_path)
if state == 'hard':
- os.link(src,tmppath)
+ os.link(b_src, b_tmppath)
else:
- os.symlink(src, tmppath)
- os.rename(tmppath, path)
+ os.symlink(b_src, b_tmppath)
+ os.rename(b_tmppath, b_path)
except OSError:
e = get_exception()
- if os.path.exists(tmppath):
- os.unlink(tmppath)
- module.fail_json(path=path, msg='Error while replacing: %s' % str(e))
+ if os.path.exists(b_tmppath):
+ os.unlink(b_tmppath)
+ module.fail_json(path=path, msg='Error while replacing: %s' % to_native(e, nonstring='simplerepr'))
else:
try:
if state == 'hard':
- os.link(src,path)
+ os.link(b_src, b_path)
else:
- os.symlink(src, path)
+ os.symlink(b_src, b_path)
except OSError:
e = get_exception()
- module.fail_json(path=path, msg='Error while linking: %s' % str(e))
+ module.fail_json(path=path, msg='Error while linking: %s' % to_native(e, nonstring='simplerepr'))
- if module.check_mode and not os.path.exists(path):
+ if module.check_mode and not os.path.exists(b_path):
module.exit_json(dest=path, src=src, changed=changed, diff=diff)
changed = module.set_fs_attributes_if_different(file_args, changed, diff)
@@ -407,16 +443,16 @@ def main():
if prev_state == 'absent':
try:
- open(path, 'w').close()
+ open(b_path, 'wb').close()
except OSError:
e = get_exception()
- module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e))
- elif prev_state in ['file', 'directory', 'hard']:
+ module.fail_json(path=path, msg='Error, could not touch target: %s' % to_native(e, nonstring='simplerepr'))
+ elif prev_state in ('file', 'directory', 'hard'):
try:
- os.utime(path, None)
+ os.utime(b_path, None)
except OSError:
e = get_exception()
- module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e))
+ module.fail_json(path=path, msg='Error while touching existing target: %s' % to_native(e, nonstring='simplerepr'))
else:
module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state))
try:
@@ -428,15 +464,12 @@ def main():
# somewhere in basic.py
if prev_state == 'absent':
# If we just created the file we can safely remove it
- os.remove(path)
+ os.remove(b_path)
raise e
module.exit_json(dest=path, changed=True, diff=diff)
module.fail_json(path=path, msg='unexpected position reached')
-# import module snippets
-from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
-
diff --git a/files/find.py b/files/find.py
index 88dc8d382ae..e2988aa36e9 100644
--- a/files/find.py
+++ b/files/find.py
@@ -26,6 +26,10 @@
import time
import re
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: find
@@ -34,7 +38,7 @@
short_description: return a list of files based on specific criteria
requirements: []
description:
- - Return a list files based on specific criteria. Multiple criteria are AND'd together.
+ - Return a list of files based on specific criteria. Multiple criteria are AND'd together.
options:
age:
required: false
@@ -48,7 +52,7 @@
required: false
default: '*'
description:
- - One or more (shell or regex) patterns, which type is controled by C(use_regex) option.
+ - One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
aliases: ['pattern']
@@ -66,7 +70,8 @@
required: false
description:
- Type of file to select
- choices: [ "file", "directory" ]
+ - The 'link' and 'any' choices were added in version 2.3
+ choices: [ "file", "directory", "link", "any" ]
default: "file"
recurse:
required: false
@@ -118,19 +123,37 @@
EXAMPLES = '''
# Recursively find /tmp files older than 2 days
-- find: paths="/tmp" age="2d" recurse=yes
+- find:
+ paths: "/tmp"
+ age: "2d"
+ recurse: yes
# Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
-- find: paths="/tmp" age="4w" size="1m" recurse=yes
+- find:
+ paths: "/tmp"
+ age: "4w"
+ size: "1m"
+ recurse: yes
# Recursively find /var/tmp files with last access time greater than 3600 seconds
-- find: paths="/var/tmp" age="3600" age_stamp=atime recurse=yes
+- find:
+ paths: "/var/tmp"
+ age: "3600"
+ age_stamp: atime
+ recurse: yes
# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
-- find: paths="/var/tmp" patterns="*.old,*.log.gz" size="10m"
+- find:
+ paths: "/var/tmp"
+ patterns: "*.old,*.log.gz"
+ size: "10m"
# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
-- find: paths="/var/tmp" patterns="^.*?\.(?:old|log\.gz)$" size="10m" use_regex=True
+- find:
+ paths: "/var/tmp"
+ patterns: "^.*?\.(?:old|log\.gz)$"
+ size: "10m"
+ use_regex: True
'''
RETURN = '''
@@ -139,13 +162,13 @@
returned: success
type: list of dictionaries
sample: [
- { path="/var/tmp/test1",
- mode=0644,
- ...,
- checksum=16fac7be61a6e4591a33ef4b729c5c3302307523
+ { path: "/var/tmp/test1",
+ mode: "0644",
+ "...": "...",
+ checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
},
- { path="/var/tmp/test2",
- ...
+ { path: "/var/tmp/test2",
+ "...": "..."
},
]
matched:
@@ -257,7 +280,7 @@ def main():
paths = dict(required=True, aliases=['name','path'], type='list'),
patterns = dict(default=['*'], type='list', aliases=['pattern']),
contains = dict(default=None, type='str'),
- file_type = dict(default="file", choices=['file', 'directory'], type='str'),
+ file_type = dict(default="file", choices=['file', 'directory', 'link', 'any'], type='str'),
age = dict(default=None, type='str'),
age_stamp = dict(default="mtime", choices=['atime','mtime','ctime'], type='str'),
size = dict(default=None, type='str'),
@@ -313,13 +336,17 @@ def main():
continue
try:
- st = os.stat(fsname)
+ st = os.lstat(fsname)
except:
msg+="%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
continue
r = {'path': fsname}
- if stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
+ if params['file_type'] == 'any':
+ if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
+ r.update(statinfo(st))
+ filelist.append(r)
+ elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
@@ -336,6 +363,11 @@ def main():
r['checksum'] = module.sha1(fsname)
filelist.append(r)
+ elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
+ if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
+ r.update(statinfo(st))
+ filelist.append(r)
+
if not params['recurse']:
break
else:
@@ -346,5 +378,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/files/ini_file.py b/files/ini_file.py
index af250f42405..f9f08ac5697 100644
--- a/files/ini_file.py
+++ b/files/ini_file.py
@@ -20,6 +20,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ini_file
@@ -77,14 +81,17 @@
required: false
default: false
version_added: "2.1"
+ create:
+ required: false
+ choices: [ "yes", "no" ]
+ default: "yes"
+ description:
+ - If set to 'no', the module will fail if the file does not already exist.
+ By default it will create the file if it is missing.
+ version_added: "2.2"
notes:
- While it is possible to add an I(option) without specifying a I(value), this makes
no sense.
- - A section named C(default) cannot be added by the module, but if it exists, individual
- options within the section can be updated. (This is a limitation of Python's I(ConfigParser).)
- Either use M(template) to create a base INI file with a C([default]) section, or use
- M(lineinfile) to add the missing line.
-requirements: [ ConfigParser ]
author:
- "Jan-Piet Mens (@jpmens)"
- "Ales Nosek (@noseka1)"
@@ -92,25 +99,31 @@
EXAMPLES = '''
# Ensure "fav=lemonade is in section "[drinks]" in specified file
-- ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes
+- ini_file:
+ dest: /etc/conf
+ section: drinks
+ option: fav
+ value: lemonade
+ mode: 0600
+ backup: yes
-- ini_file: dest=/etc/anotherconf
- section=drinks
- option=temperature
- value=cold
- backup=yes
+- ini_file:
+ dest: /etc/anotherconf
+ section: drinks
+ option: temperature
+ value: cold
+ backup: yes
'''
-import ConfigParser
-import sys
import os
+import re
# ==============================================================
# match_opt
def match_opt(option, line):
option = re.escape(option)
- return re.match('%s( |\t)*=' % option, line) \
+ return re.match(' *%s( |\t)*=' % option, line) \
or re.match('# *%s( |\t)*=' % option, line) \
or re.match('; *%s( |\t)*=' % option, line)
@@ -119,30 +132,43 @@ def match_opt(option, line):
def match_active_opt(option, line):
option = re.escape(option)
- return re.match('%s( |\t)*=' % option, line)
+ return re.match(' *%s( |\t)*=' % option, line)
# ==============================================================
# do_ini
-def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False):
+def do_ini(module, filename, section=None, option=None, value=None,
+ state='present', backup=False, no_extra_spaces=False, create=False):
+ diff = {'before': '',
+ 'after': '',
+ 'before_header': '%s (content)' % filename,
+ 'after_header': '%s (content)' % filename}
if not os.path.exists(filename):
- try:
- open(filename,'w').close()
- except:
- module.fail_json(msg="Destination file %s not writable" % filename)
- ini_file = open(filename, 'r')
- try:
- ini_lines = ini_file.readlines()
- # append a fake section line to simplify the logic
- ini_lines.append('[')
- finally:
- ini_file.close()
+ if not create:
+ module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)
+ destpath = os.path.dirname(filename)
+ if not os.path.exists(destpath) and not module.check_mode:
+ os.makedirs(destpath)
+ ini_lines = []
+ else:
+ ini_file = open(filename, 'r')
+ try:
+ ini_lines = ini_file.readlines()
+ finally:
+ ini_file.close()
+
+ if module._diff:
+ diff['before'] = ''.join(ini_lines)
+
+ # append a fake section line to simplify the logic
+ ini_lines.append('[')
within_section = not section
section_start = 0
changed = False
+ msg = 'OK'
if no_extra_spaces:
assignment_format = '%s=%s\n'
else:
@@ -156,11 +182,17 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
if within_section:
if state == 'present':
# insert missing option line at the end of the section
- ini_lines.insert(index, assignment_format % (option, value))
- changed = True
+ for i in range(index, 0, -1):
+ # search backwards for previous non-blank or non-comment line
+ if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
+ ini_lines.insert(i, assignment_format % (option, value))
+ msg = 'option added'
+ changed = True
+ break
elif state == 'absent' and not option:
# remove the entire section
del ini_lines[section_start:index]
+ msg = 'section removed'
changed = True
break
else:
@@ -170,9 +202,11 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
if match_opt(option, line):
newline = assignment_format % (option, value)
changed = ini_lines[index] != newline
+ if changed:
+ msg = 'option changed'
ini_lines[index] = newline
if changed:
- # remove all possible option occurences from the rest of the section
+ # remove all possible option occurrences from the rest of the section
index = index + 1
while index < len(ini_lines):
line = ini_lines[index]
@@ -183,11 +217,12 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
else:
index = index + 1
break
- else:
- # comment out the existing option line
+ elif state == 'absent':
+ # delete the existing line
if match_active_opt(option, line):
- ini_lines[index] = '#%s' % ini_lines[index]
+ del ini_lines[index]
changed = True
+ msg = 'option changed'
break
# remove the fake section line
@@ -197,18 +232,22 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
ini_lines.append('[%s]\n' % section)
ini_lines.append(assignment_format % (option, value))
changed = True
+ msg = 'section and option added'
+ if module._diff:
+ diff['after'] = ''.join(ini_lines)
+ backup_file = None
if changed and not module.check_mode:
if backup:
- module.backup_local(filename)
+ backup_file = module.backup_local(filename)
ini_file = open(filename, 'w')
try:
ini_file.writelines(ini_lines)
finally:
ini_file.close()
- return changed
+ return (changed, backup_file, diff, msg)
# ==============================================================
# main
@@ -223,14 +262,13 @@ def main():
value = dict(required=False),
backup = dict(default='no', type='bool'),
state = dict(default='present', choices=['present', 'absent']),
- no_extra_spaces = dict(required=False, default=False, type='bool')
+ no_extra_spaces = dict(required=False, default=False, type='bool'),
+ create=dict(default=True, type='bool')
),
add_file_common_args = True,
supports_check_mode = True
)
- info = dict()
-
dest = os.path.expanduser(module.params['dest'])
section = module.params['section']
option = module.params['option']
@@ -238,14 +276,20 @@ def main():
state = module.params['state']
backup = module.params['backup']
no_extra_spaces = module.params['no_extra_spaces']
+ create = module.params['create']
+
+ (changed,backup_file,diff,msg) = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces, create)
- changed = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces)
+ if not module.check_mode and os.path.exists(dest):
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
- file_args = module.load_file_common_arguments(module.params)
- changed = module.set_fs_attributes_if_different(file_args, changed)
+ results = { 'changed': changed, 'msg': msg, 'dest': dest, 'diff': diff }
+ if backup_file is not None:
+ results['backup_file'] = backup_file
# Mission complete
- module.exit_json(dest=dest, changed=changed, msg="OK")
+ module.exit_json(**results)
# import module snippets
from ansible.module_utils.basic import *
diff --git a/files/lineinfile.py b/files/lineinfile.py
index 61d1f35310e..ba7f4a3cc16 100644
--- a/files/lineinfile.py
+++ b/files/lineinfile.py
@@ -19,6 +19,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: lineinfile
@@ -120,26 +124,61 @@
"""
EXAMPLES = r"""
-- lineinfile: dest=/etc/selinux/config regexp=^SELINUX= line=SELINUX=enforcing
-
-- lineinfile: dest=/etc/sudoers state=absent regexp="^%wheel"
-
-- lineinfile: dest=/etc/hosts regexp='^127\.0\.0\.1' line='127.0.0.1 localhost' owner=root group=root mode=0644
-
-- lineinfile: dest=/etc/httpd/conf/httpd.conf regexp="^Listen " insertafter="^#Listen " line="Listen 8080"
-
-- lineinfile: dest=/etc/services regexp="^# port for http" insertbefore="^www.*80/tcp" line="# port for http by default"
+- lineinfile:
+ dest: /etc/selinux/config
+ regexp: '^SELINUX='
+ line: 'SELINUX=enforcing'
+
+- lineinfile:
+ dest: /etc/sudoers
+ state: absent
+ regexp: '^%wheel'
+
+- lineinfile:
+ dest: /etc/hosts
+ regexp: '^127\.0\.0\.1'
+ line: '127.0.0.1 localhost'
+ owner: root
+ group: root
+ mode: 0644
+
+- lineinfile:
+ dest: /etc/httpd/conf/httpd.conf
+ regexp: '^Listen '
+ insertafter: '^#Listen '
+ line: 'Listen 8080'
+
+- lineinfile:
+ dest: /etc/services
+ regexp: '^# port for http'
+ insertbefore: '^www.*80/tcp'
+ line: '# port for http by default'
# Add a line to a file if it does not exist, without passing regexp
-- lineinfile: dest=/tmp/testfile line="192.168.1.99 foo.lab.net foo"
+- lineinfile:
+ dest: /tmp/testfile
+ line: '192.168.1.99 foo.lab.net foo'
# Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
-- lineinfile: "dest=/etc/sudoers state=present regexp='^%wheel' line='%wheel ALL=(ALL) NOPASSWD: ALL'"
-
-- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes
+- lineinfile: "
+ dest: /etc/sudoers
+ state: present
+ regexp: '^%wheel'
+ line: '%wheel ALL=(ALL) NOPASSWD: ALL'
+
+- lineinfile:
+ dest: /opt/jboss-as/bin/standalone.conf
+ regexp: '^(.*)Xms(\d+)m(.*)$'
+ line: '\1Xms${xms}m\3'
+ backrefs: yes
# Validate the sudoers file before saving
-- lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'
+- lineinfile:
+ dest: /etc/sudoers
+ state: present
+ regexp: '^%ADMIN ALL='
+ line: '%ADMIN ALL=(ALL) NOPASSWD: ALL'
+ validate: 'visudo -cf %s'
"""
import re
@@ -164,13 +203,15 @@ def write_changes(module, b_lines, dest):
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
- (rc, out, err) = module.run_command(to_bytes(validate % tmpfile))
+ (rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict'))
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
- module.atomic_move(tmpfile, os.path.realpath(dest), unsafe_writes=module.params['unsafe_writes'])
+ module.atomic_move(tmpfile,
+ to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'),
+ unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message, diff):
@@ -194,7 +235,7 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
- b_dest = to_bytes(dest)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
@@ -211,12 +252,12 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
diff['before'] = to_native(b('').join(b_lines))
if regexp is not None:
- bre_m = re.compile(to_bytes(regexp))
+ bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
if insertafter not in (None, 'BOF', 'EOF'):
- bre_ins = re.compile(to_bytes(insertafter))
+ bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
elif insertbefore not in (None, 'BOF'):
- bre_ins = re.compile(to_bytes(insertbefore))
+ bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
else:
bre_ins = None
@@ -224,7 +265,7 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
# index[1] is the line num where insertafter/inserbefore has been found
index = [-1, -1]
m = None
- b_line = to_bytes(line)
+ b_line = to_bytes(line, errors='surrogate_or_strict')
for lineno, b_cur_line in enumerate(b_lines):
if regexp is not None:
match_found = bre_m.search(b_cur_line)
@@ -244,7 +285,7 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
msg = ''
changed = False
# Regexp matched a line in the file
- b_linesep = to_bytes(os.linesep)
+ b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict')
if index[0] != -1:
if backrefs:
b_new_line = m.expand(b_line)
@@ -310,7 +351,7 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
def absent(module, dest, regexp, line, backup):
- b_dest = to_bytes(dest)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
module.exit_json(changed=False, msg="file not present")
@@ -328,10 +369,10 @@ def absent(module, dest, regexp, line, backup):
diff['before'] = to_native(b('').join(b_lines))
if regexp is not None:
- bre_c = re.compile(to_bytes(regexp))
+ bre_c = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
found = []
- b_line = to_bytes(line)
+ b_line = to_bytes(line, errors='surrogate_or_strict')
def matcher(b_cur_line):
if regexp is not None:
match_found = bre_c.search(b_cur_line)
@@ -392,7 +433,7 @@ def main():
backrefs = params['backrefs']
dest = params['dest']
- b_dest = to_bytes(dest)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.isdir(b_dest):
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
diff --git a/files/replace.py b/files/replace.py
index fa7058d70f9..85d6d91e1f7 100644
--- a/files/replace.py
+++ b/files/replace.py
@@ -22,6 +22,10 @@
import os
import tempfile
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: replace
@@ -77,11 +81,24 @@
"""
EXAMPLES = r"""
-- replace: dest=/etc/hosts regexp='(\s+)old\.host\.name(\s+.*)?$' replace='\1new.host.name\2' backup=yes
-
-- replace: dest=/home/jdoe/.ssh/known_hosts regexp='^old\.host\.name[^\n]*\n' owner=jdoe group=jdoe mode=644
-
-- replace: dest=/etc/apache/ports regexp='^(NameVirtualHost|Listen)\s+80\s*$' replace='\1 127.0.0.1:8080' validate='/usr/sbin/apache2ctl -f %s -t'
+- replace:
+ dest: /etc/hosts
+ regexp: '(\s+)old\.host\.name(\s+.*)?$'
+ replace: '\1new.host.name\2'
+ backup: yes
+
+- replace:
+ dest: /home/jdoe/.ssh/known_hosts
+ regexp: '^old\.host\.name[^\n]*\n'
+ owner: jdoe
+ group: jdoe
+ mode: 0644
+
+- replace:
+ dest: /etc/apache/ports
+ regexp: '^(NameVirtualHost|Listen)\s+80\s*$'
+ replace: '\1 127.0.0.1:8080'
+ validate: '/usr/sbin/apache2ctl -f %s -t'
"""
def write_changes(module,contents,dest):
@@ -131,6 +148,7 @@ def main():
params = module.params
dest = os.path.expanduser(params['dest'])
+ res_args = dict()
if os.path.isdir(dest):
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
@@ -142,12 +160,6 @@ def main():
contents = f.read()
f.close()
- if module._diff:
- diff = {
- 'before_header': dest,
- 'before': contents,
- }
-
mre = re.compile(params['regexp'], re.MULTILINE)
result = re.subn(mre, params['replace'], contents, 0)
@@ -155,22 +167,25 @@ def main():
msg = '%s replacements made' % result[1]
changed = True
if module._diff:
- diff['after_header'] = dest
- diff['after'] = result[0]
+ res_args['diff'] = {
+ 'before_header': dest,
+ 'before': contents,
+ 'after_header': dest,
+ 'after': result[0],
+ }
else:
msg = ''
changed = False
- diff = dict()
if changed and not module.check_mode:
if params['backup'] and os.path.exists(dest):
- module.backup_local(dest)
+ res_args['backup_file'] = module.backup_local(dest)
if params['follow'] and os.path.islink(dest):
dest = os.path.realpath(dest)
write_changes(module, result[0], dest)
- msg, changed = check_file_attrs(module, changed, msg)
- module.exit_json(changed=changed, msg=msg, diff=diff)
+ res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg)
+ module.exit_json(**res_args)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
diff --git a/files/stat.py b/files/stat.py
index f28ddcc0cc1..8cccae1d685 100644
--- a/files/stat.py
+++ b/files/stat.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: stat
@@ -53,25 +57,35 @@
default: sha1
aliases: [ 'checksum_algo', 'checksum' ]
version_added: "2.0"
- mime:
+ get_mime:
description:
- Use file magic and return data about the nature of the file. this uses
the 'file' utility found on most Linux/Unix systems.
- This will add both `mime_type` and 'charset' fields to the return, if possible.
+ - In 2.3 this option changed from 'mime' to 'get_mime' and the default changed to 'Yes'
required: false
choices: [ Yes, No ]
- default: No
+ default: Yes
version_added: "2.1"
- aliases: [ 'mime_type', 'mime-type' ]
+ aliases: [ 'mime', 'mime_type', 'mime-type' ]
+ get_attributes:
+ description:
+ - Get file attributes using lsattr tool if present.
+ required: false
+ default: True
+ version_added: "2.3"
+ aliases: [ 'attributes', 'attr' ]
author: "Bruce Pennypacker (@bpennypacker)"
'''
EXAMPLES = '''
# Obtain the stats of /etc/foo.conf, and check that the file still belongs
# to 'root'. Fail otherwise.
-- stat: path=/etc/foo.conf
+- stat:
+ path: /etc/foo.conf
register: st
-- fail: msg="Whoops! file ownership has changed"
+- fail:
+ msg: "Whoops! file ownership has changed"
when: st.stat.pw_name != 'root'
# Determine if a path exists and is a symlink. Note that if the path does
@@ -79,35 +93,50 @@
# therefore, we must test whether it is defined.
# Run this to understand the structure, the skipped ones do not pass the
# check performed by 'when'
-- stat: path=/path/to/something
+- stat:
+ path: /path/to/something
register: sym
-- debug: msg="islnk isn't defined (path doesn't exist)"
+
+- debug:
+ msg: "islnk isn't defined (path doesn't exist)"
when: sym.stat.islnk is not defined
-- debug: msg="islnk is defined (path must exist)"
+
+- debug:
+ msg: "islnk is defined (path must exist)"
when: sym.stat.islnk is defined
-- debug: msg="Path exists and is a symlink"
+
+- debug:
+ msg: "Path exists and is a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk
-- debug: msg="Path exists and isn't a symlink"
+
+- debug:
+ msg: "Path exists and isn't a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk == False
# Determine if a path exists and is a directory. Note that we need to test
# both that p.stat.isdir actually exists, and also that it's set to true.
-- stat: path=/path/to/something
+- stat:
+ path: /path/to/something
register: p
-- debug: msg="Path exists and is a directory"
+- debug:
+ msg: "Path exists and is a directory"
when: p.stat.isdir is defined and p.stat.isdir
# Don't do md5 checksum
-- stat: path=/path/to/myhugefile get_md5=no
+- stat:
+ path: /path/to/myhugefile
+ get_md5: no
# Use sha256 to calculate checksum
-- stat: path=/path/to/something checksum_algorithm=sha256
+- stat:
+ path: /path/to/something
+ checksum_algorithm: sha256
'''
RETURN = '''
stat:
- description: dictionary containing all the stat data
+ description: dictionary containing all the stat data, some platforms might add additional fields
returned: success
type: dictionary
contains:
@@ -172,7 +201,7 @@
type: int
sample: 1003
size:
- description: Size in bytes for a plain file, ammount of data for some special files
+ description: Size in bytes for a plain file, amount of data for some special files
returned: success, path exists and user can read stats
type: int
sample: 203
@@ -307,16 +336,25 @@
returned: success, path exists and user can read the path
type: boolean
sample: False
+ version_added: 2.2
writeable:
description: Tells you if the invoking user has the right to write the path
returned: success, path exists and user can write the path
type: boolean
sample: False
+ version_added: 2.2
executable:
description: Tells you if the invoking user has the execute the path
returned: success, path exists and user can execute the path
type: boolean
sample: False
+ version_added: 2.2
+ attributes:
+ description: list of file attributes
+ returned: success, path exists and user can execute the path
+ type: boolean
+ sample: [ immutable, extent ]
+ version_added: 2.3
'''
import errno
@@ -326,12 +364,11 @@
import stat
# import module snippets
-from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import AnsibleModule, format_attributes
from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils._text import to_bytes
-
-def format_output(module, path, st, follow, get_md5, get_checksum,
- checksum_algorithm, mimetype=None, charset=None):
+def format_output(module, path, st):
mode = st.st_mode
# back to ansible
@@ -366,37 +403,30 @@ def format_output(module, path, st, follow, get_md5, get_checksum,
xoth=bool(mode & stat.S_IXOTH),
isuid=bool(mode & stat.S_ISUID),
isgid=bool(mode & stat.S_ISGID),
- readable=os.access(path, os.R_OK),
- writeable=os.access(path, os.W_OK),
- excutable=os.access(path, os.X_OK),
- )
-
- if stat.S_ISLNK(mode):
- output['lnk_source'] = os.path.realpath(path)
-
- if stat.S_ISREG(mode) and get_md5 and os.access(path, os.R_OK):
- # Will fail on FIPS-140 compliant systems
- try:
- output['md5'] = module.md5(path)
- except ValueError:
- output['md5'] = None
-
- if stat.S_ISREG(mode) and get_checksum and os.access(path, os.R_OK):
- output['checksum'] = module.digest_from_file(path, checksum_algorithm)
+ )
- try:
- pw = pwd.getpwuid(st.st_uid)
+ # Platform dependant flags:
+ for other in [
+ # Some Linux
+ ('st_blocks','blocks'),
+ ('st_blksize', 'block_size'),
+ ('st_rdev','device_type'),
+ ('st_flags', 'flags'),
+ # Some Berkley based
+ ('st_gen', 'generation'),
+ ('st_birthtime', 'birthtime'),
+ # RISCOS
+ ('st_ftype', 'file_type'),
+ ('st_attrs', 'attrs'),
+ ('st_obtype', 'object_type'),
+ # OS X
+ ('st_rsize', 'real_size'),
+ ('st_creator', 'creator'),
+ ('st_type', 'file_type'),
+ ]:
+ if hasattr(st, other[0]):
+ output[other[1]] = getattr(st, other[0])
- output['pw_name'] = pw.pw_name
-
- grp_info = grp.getgrgid(st.st_gid)
- output['gr_name'] = grp_info.gr_name
- except:
- pass
-
- if not (mimetype is None and charset is None):
- output['mime_type'] = mimetype
- output['charset'] = charset
return output
@@ -408,26 +438,30 @@ def main():
follow=dict(default='no', type='bool'),
get_md5=dict(default='yes', type='bool'),
get_checksum=dict(default='yes', type='bool'),
+ get_mime=dict(default=True, type='bool', aliases=['mime', 'mime_type', 'mime-type']),
+ get_attributes=dict(default=True, type='bool', aliases=['attributes', 'attr']),
checksum_algorithm=dict(default='sha1', type='str',
choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
aliases=['checksum_algo', 'checksum']),
- mime=dict(default=False, type='bool', aliases=['mime_type', 'mime-type']),
),
supports_check_mode=True
)
path = module.params.get('path')
+ b_path = to_bytes(path, errors='surrogate_or_strict')
follow = module.params.get('follow')
- get_mime = module.params.get('mime')
+ get_mime = module.params.get('get_mime')
+ get_attr = module.params.get('get_attributes')
get_md5 = module.params.get('get_md5')
get_checksum = module.params.get('get_checksum')
checksum_algorithm = module.params.get('checksum_algorithm')
+ # main stat data
try:
if follow:
- st = os.stat(path)
+ st = os.stat(b_path)
else:
- st = os.lstat(path)
+ st = os.lstat(b_path)
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
@@ -436,25 +470,65 @@ def main():
module.fail_json(msg=e.strerror)
- mimetype = None
- charset = None
+ # process base results
+ output = format_output(module, path, st)
+
+ # resolved permissions
+ for perm in [('readable', os.R_OK), ('writeable', os.W_OK), ('executable', os.X_OK)]:
+ output[perm[0]] = os.access(path, perm[1])
+
+ # symlink info
+ if output.get('islnk'):
+ output['lnk_source'] = os.path.realpath(path)
+
+ try: # user data
+ pw = pwd.getpwuid(st.st_uid)
+ output['pw_name'] = pw.pw_name
+ except:
+ pass
+
+ try: # group data
+ grp_info = grp.getgrgid(st.st_gid)
+ output['gr_name'] = grp_info.gr_name
+ except:
+ pass
+
+ # checksums
+ if output.get('isreg') and output.get('readable'):
+ if get_md5:
+ # Will fail on FIPS-140 compliant systems
+ try:
+ output['md5'] = module.md5(path)
+ except ValueError:
+ output['md5'] = None
+
+ if get_checksum:
+ output['checksum'] = module.digest_from_file(path, checksum_algorithm)
+
+ # try to get mime data if requested
if get_mime:
- mimetype = 'unknown'
- charset = 'unknown'
-
- filecmd = [module.get_bin_path('file', True), '-i', path]
- try:
- rc, out, err = module.run_command(filecmd)
- if rc == 0:
- mimetype, charset = out.split(':')[1].split(';')
- mimetype = mimetype.strip()
- charset = charset.split('=')[1].strip()
- except:
- pass
-
- output = format_output(module, path, st, follow, get_md5, get_checksum,
- checksum_algorithm, mimetype=mimetype,
- charset=charset)
+ output['mimetype'] = output['charset'] = 'unknown'
+ mimecmd = module.get_bin_path('file')
+ if mimecmd:
+ mimecmd = [mimecmd, '-i', path]
+ try:
+ rc, out, err = module.run_command(mimecmd)
+ if rc == 0:
+ mimetype, charset = out.split(':')[1].split(';')
+ output['mimetype'] = mimetype.strip()
+ output['charset'] = charset.split('=')[1].strip()
+ except:
+ pass
+
+ # try to get attr data
+ if get_attr:
+ output['version'] = None
+ output['attributes'] = []
+ output['attr_flags'] = ''
+ out = module.get_file_attributes(path)
+ for x in ('version', 'attributes', 'attr_flags'):
+ if x in out:
+ output[x] = out[x]
module.exit_json(changed=False, stat=output)
diff --git a/files/synchronize.py b/files/synchronize.py
index 9a053c74d75..4a7933adf1d 100644
--- a/files/synchronize.py
+++ b/files/synchronize.py
@@ -16,13 +16,17 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: synchronize
version_added: "1.4"
-short_description: Uses rsync to make synchronizing file paths in your playbooks quick and easy.
+short_description: A wrapper around rsync to make common tasks in your playbooks quick and easy.
description:
- - C(synchronize) is a wrapper around the rsync command, meant to make common tasks with rsync easier. It is run and originates on the local host where Ansible is being run. Of course, you could just use the command action to call rsync yourself, but you also have to add a fair number of boilerplate options and host facts. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case. C(synchronize) does not provide access to the full power of rsync, but does make most invocations easier to follow.
+ - C(synchronize) is a wrapper around rsync to make common tasks in your playbooks quick and easy. It is run and originates on the local host where Ansible is being run. Of course, you could just use the C(command) action to call rsync yourself, but you also have to add a fair number of boilerplate options and host facts. C(synchronize) is not intended to provide access to the full power of rsync, but does make the most common invocations easier to implement. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case.
options:
src:
description:
@@ -130,7 +134,7 @@
required: false
rsync_timeout:
description:
- - Specify a --timeout for the rsync command in seconds.
+ - Specify a --timeout for the rsync command in seconds.
default: 0
required: false
set_remote_user:
@@ -174,71 +178,108 @@
- Expect that dest=~/x will be ~/x even if using sudo.
- Inspect the verbose output to validate the destination user/host/path
are what was expected.
- - To exclude files and directories from being synchronized, you may add
+ - To exclude files and directories from being synchronized, you may add
C(.rsync-filter) files to the source directory.
- rsync daemon must be up and running with correct permission when using
rsync protocol in source or destination path.
-
+ - The C(synchronize) module forces `--delay-updates` to avoid leaving a destination in a broken in-between state if the underlying rsync process encounters an error. Those synchronizing large numbers of files that are willing to trade safety for performance should call rsync directly.
author: "Timothy Appnel (@tima)"
'''
EXAMPLES = '''
# Synchronization of src on the control machine to dest on the remote hosts
-synchronize: src=some/relative/path dest=/some/absolute/path
+- synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
# Synchronization using rsync protocol (push)
-synchronize: src=some/relative/path/ dest=rsync://somehost.com/path/
+- synchronize:
+ src: some/relative/path/
+ dest: rsync://somehost.com/path/
# Synchronization using rsync protocol (pull)
-synchronize: mode=pull src=rsync://somehost.com/path/ dest=/some/absolute/path/
+- synchronize:
+ mode: pull
+ src: rsync://somehost.com/path/
+ dest: /some/absolute/path/
# Synchronization using rsync protocol on delegate host (push)
-synchronize: >
- src=/some/absolute/path/ dest=rsync://somehost.com/path/
- delegate_to: delegate.host
+- synchronize:
+ src: /some/absolute/path/
+ dest: rsync://somehost.com/path/
+ delegate_to: delegate.host
# Synchronization using rsync protocol on delegate host (pull)
-synchronize: >
- mode=pull src=rsync://somehost.com/path/ dest=/some/absolute/path/
- delegate_to: delegate.host
+- synchronize:
+ mode: pull
+ src: rsync://somehost.com/path/
+ dest: /some/absolute/path/
+ delegate_to: delegate.host
# Synchronization without any --archive options enabled
-synchronize: src=some/relative/path dest=/some/absolute/path archive=no
+- synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ archive: no
# Synchronization with --archive options enabled except for --recursive
-synchronize: src=some/relative/path dest=/some/absolute/path recursive=no
+- synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ recursive: no
# Synchronization with --archive options enabled except for --times, with --checksum option enabled
-synchronize: src=some/relative/path dest=/some/absolute/path checksum=yes times=no
+- synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ checksum: yes
+ times: no
# Synchronization without --archive options enabled except use --links
-synchronize: src=some/relative/path dest=/some/absolute/path archive=no links=yes
+- synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ archive: no
+ links: yes
# Synchronization of two paths both on the control machine
-local_action: synchronize src=some/relative/path dest=/some/absolute/path
+- synchronize
+ src: some/relative/path
+ dest: /some/absolute/path
+ delegate_to: localhost
# Synchronization of src on the inventory host to the dest on the localhost in pull mode
-synchronize: mode=pull src=some/relative/path dest=/some/absolute/path
+- synchronize:
+ mode: pull
+ src: some/relative/path
+ dest: /some/absolute/path
# Synchronization of src on delegate host to dest on the current inventory host.
-synchronize:
+- synchronize:
src: /first/absolute/path
dest: /second/absolute/path
-delegate_to: delegate.host
+ delegate_to: delegate.host
# Synchronize two directories on one remote host.
-synchronize:
+- synchronize:
src: /first/absolute/path
dest: /second/absolute/path
-delegate_to: "{{ inventory_hostname }}"
+ delegate_to: "{{ inventory_hostname }}"
# Synchronize and delete files in dest on the remote host that are not found in src of localhost.
-synchronize: src=some/relative/path dest=/some/absolute/path delete=yes recursive=yes
+- synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ delete: yes
+ recursive: yes
# Synchronize using an alternate rsync command
# This specific command is granted su privileges on the destination
-synchronize: src=some/relative/path dest=/some/absolute/path rsync_path="su -c rsync"
+- synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ rsync_path: "su -c rsync"
# Example .rsync-filter file in the source directory
- var # exclude any path whose last part is 'var'
@@ -246,9 +287,9 @@
+ /var/conf # include /var/conf even though it was previously excluded
# Synchronize passing in extra rsync options
-synchronize:
+- synchronize:
src: /tmp/helloworld
- dest: /var/www/helloword
+ dest: /var/www/helloworld
rsync_opts:
- "--no-motd"
- "--exclude=.git"
@@ -397,7 +438,7 @@ def main():
if private_key is None:
private_key = ''
else:
- private_key = '-i '+ private_key
+ private_key = '-i '+ private_key
ssh_opts = '-S none'
@@ -413,7 +454,7 @@ def main():
if not source.startswith('"rsync://') and not dest.startswith('"rsync://'):
# If the user specified a port value
# Note: The action plugin takes care of setting this to a port from
- # inventory if the user didn't specify an explict dest_port
+ # inventory if the user didn't specify an explicit dest_port
if dest_port is not None:
cmd += " --rsh 'ssh %s %s -o Port=%s'" % (private_key, ssh_opts, dest_port)
else:
@@ -433,9 +474,9 @@ def main():
# expand the paths
if '@' not in source:
- source = os.path.expanduser(source)
+ source = os.path.expanduser(source)
if '@' not in dest:
- dest = os.path.expanduser(dest)
+ dest = os.path.expanduser(dest)
cmd = ' '.join([cmd, source, dest])
cmdstr = cmd
@@ -446,7 +487,7 @@ def main():
changed = changed_marker in out
out_clean=out.replace(changed_marker,'')
out_lines=out_clean.split('\n')
- while '' in out_lines:
+ while '' in out_lines:
out_lines.remove('')
if module._diff:
diff = {'prepared': out_clean}
@@ -460,5 +501,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
-
+if __name__ == '__main__':
+ main()
diff --git a/files/template.py b/files/template.py
index 5ee903a778f..c603ec8f428 100644
--- a/files/template.py
+++ b/files/template.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: template
@@ -25,12 +29,12 @@
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- - "Six additional variables can be used in templates: C(ansible_managed)
+ - "Six additional variables can be used in templates: C(ansible_managed)
(configurable via the C(defaults) section of C(ansible.cfg)) contains a string
which can be used to describe the template name, host, modification time of the
- template file and the owner uid, C(template_host) contains the node name of
+ template file and the owner uid, C(template_host) contains the node name of
the template's machine, C(template_uid) the owner, C(template_path) the
- absolute path of the template, C(template_fullpath) is the absolute path of the
+ absolute path of the template, C(template_fullpath) is the absolute path of the
template, and C(template_run_date) is the date that the template was rendered. Note that including
a string that uses a date in the template will result in the template being marked 'changed'
each time."
@@ -77,11 +81,24 @@
EXAMPLES = '''
# Example from Ansible Playbooks
-- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode=0644
+- template:
+ src: /mytemplates/foo.j2
+ dest: /etc/file.conf
+ owner: bin
+ group: wheel
+ mode: 0644
# The same example, but using symbolic modes equivalent to 0644
-- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode="u=rw,g=r,o=r"
+- template:
+ src: /mytemplates/foo.j2
+ dest: /etc/file.conf
+ owner: bin
+ group: wheel
+ mode: "u=rw,g=r,o=r"
# Copy a new "sudoers" file into place, after passing validation with visudo
-- template: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s'
+- template:
+ src: /mine/sudoers
+ dest: /etc/sudoers
+ validate: 'visudo -cf %s'
'''
diff --git a/files/unarchive.py b/files/unarchive.py
index 41f72bdd26b..c919d1f0ae8 100644
--- a/files/unarchive.py
+++ b/files/unarchive.py
@@ -21,6 +21,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: unarchive
@@ -114,13 +118,21 @@
EXAMPLES = '''
# Example from Ansible Playbooks
-- unarchive: src=foo.tgz dest=/var/lib/foo
+- unarchive:
+ src: foo.tgz
+ dest: /var/lib/foo
# Unarchive a file that is already on the remote machine
-- unarchive: src=/tmp/foo.zip dest=/usr/local/bin remote_src=yes
+- unarchive:
+ src: /tmp/foo.zip
+ dest: /usr/local/bin
+ remote_src: yes
# Unarchive a file that needs to be downloaded (added in 2.0)
-- unarchive: src=https://example.com/example.zip dest=/usr/local/bin remote_src=yes
+- unarchive:
+ src: "https://example.com/example.zip"
+ dest: /usr/local/bin
+ remote_src: yes
'''
import re
@@ -133,6 +145,7 @@
import binascii
import codecs
from zipfile import ZipFile, BadZipfile
+from ansible.module_utils._text import to_text
try: # python 3.3+
from shlex import quote
@@ -250,7 +263,7 @@ def files_in_archive(self, force_refresh=False):
try:
for member in archive.namelist():
if member not in self.excludes:
- self._files_in_archive.append(member)
+ self._files_in_archive.append(to_native(member))
except:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
@@ -339,7 +352,12 @@ def is_unarchived(self):
if len(pcs[0]) != 7 and len(pcs[0]) != 10: continue
if len(pcs[6]) != 15: continue
- if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxst-'):
+ # Possible entries:
+ # -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660
+ # -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs
+ # -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
+ # --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr
+ if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'):
continue
ztype = pcs[0][0]
@@ -347,7 +365,7 @@ def is_unarchived(self):
version = pcs[1]
ostype = pcs[2]
size = int(pcs[3])
- path = pcs[7]
+ path = to_text(pcs[7], errors='surrogate_or_strict')
# Skip excluded files
if path in self.excludes:
@@ -434,6 +452,7 @@ def is_unarchived(self):
elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
# Add to excluded files, ignore other changes
out += 'File %s is newer, excluding file\n' % path
+ self.excludes.append(path)
continue
else:
if timestamp != st.st_mtime:
@@ -539,24 +558,24 @@ def unarchive(self):
cmd = [ self.cmd_path, '-o', self.src ]
if self.opts:
cmd.extend(self.opts)
- if self.includes:
+ # NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments)
+# if self.includes:
# NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped
- cmd.extend(map(shell_escape, self.includes))
- # We don't need to handle excluded files, since we simply do not include them
-# if self.excludes:
-# cmd.extend([ '-x' ] + self.excludes ])
+# cmd.extend(map(shell_escape, self.includes))
+ if self.excludes:
+ cmd.extend([ '-x' ] + self.excludes)
cmd.extend([ '-d', self.dest ])
rc, out, err = self.module.run_command(cmd)
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
- return False
+ return False, 'Command "unzip" not found.'
cmd = [ self.cmd_path, '-l', self.src ]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
- return True
- return False
+ return True, None
+ return False, 'Command "%s" could not handle archive.' % self.cmd_path
# class to handle gzipped tar files
@@ -579,6 +598,21 @@ def __init__(self, src, dest, file_args, module):
self.zipflag = '-z'
self._files_in_archive = []
+ if self.cmd_path:
+ self.tar_type = self._get_tar_type()
+ else:
+ self.tar_type = None
+
+ def _get_tar_type(self):
+ cmd = [self.cmd_path, '--version']
+ (rc, out, err) = self.module.run_command(cmd)
+ tar_type = None
+ if out.startswith('bsdtar'):
+ tar_type = 'bsd'
+ elif out.startswith('tar') and 'GNU' in out:
+ tar_type = 'gnu'
+ return tar_type
+
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
@@ -592,7 +626,7 @@ def files_in_archive(self, force_refresh=False):
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
- rc, out, err = self.module.run_command(cmd)
+ rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
if rc != 0:
raise UnarchiveError('Unable to list files in the archive')
@@ -601,7 +635,7 @@ def files_in_archive(self, force_refresh=False):
# filename = filename.decode('string_escape')
filename = codecs.escape_decode(filename)[0]
if filename and filename not in self.excludes:
- self._files_in_archive.append(filename)
+ self._files_in_archive.append(to_native(filename))
return self._files_in_archive
def is_unarchived(self):
@@ -614,14 +648,12 @@ def is_unarchived(self):
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
- if self.file_args['mode']:
- cmd.append('--mode=' + quote(self.file_args['mode']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
- rc, out, err = self.module.run_command(cmd)
+ rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
# Check whether the differences are in something that we're
# setting anyway
@@ -663,28 +695,29 @@ def unarchive(self):
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
- if self.file_args['mode']:
- cmd.append('--mode=' + quote(self.file_args['mode']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ])
cmd.extend([ '-f', self.src ])
- rc, out, err = self.module.run_command(cmd, cwd=self.dest)
+ rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
- return False
+ return False, 'Commands "gtar" and "tar" not found.'
+
+ if self.tar_type != 'gnu':
+ return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
try:
if self.files_in_archive:
- return True
+ return True, None
except UnarchiveError:
- pass
+ return False, 'Command "%s" could not handle archive.' % self.cmd_path
# Errors and no files in archive assume that we weren't able to
# properly unarchive it
- return False
+ return False, 'Command "%s" found no files in archive.' % self.cmd_path
# class to handle tar files that aren't compressed
@@ -712,11 +745,15 @@ def __init__(self, src, dest, file_args, module):
# try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, file_args, module):
handlers = [ZipArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive]
+ reasons = set()
for handler in handlers:
obj = handler(src, dest, file_args, module)
- if obj.can_handle_archive():
+ (can_handle, reason) = obj.can_handle_archive()
+ if can_handle:
return obj
- module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed.' % src)
+ reasons.add(reason)
+ reason_msg = ' '.join(reasons)
+ module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed. %s' % (src, reason_msg))
def main():
@@ -741,9 +778,6 @@ def main():
supports_check_mode = True,
)
- # We screenscrape a huge amount of commands so use C locale anytime we do
- module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
-
src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest'])
copy = module.params['copy']
@@ -841,5 +875,7 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
+from ansible.module_utils._text import to_native
+
if __name__ == '__main__':
main()
diff --git a/files/xattr.py b/files/xattr.py
index 378665477cf..0ae74efbaa9 100644
--- a/files/xattr.py
+++ b/files/xattr.py
@@ -14,6 +14,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: xattr
@@ -63,16 +67,25 @@
EXAMPLES = '''
# Obtain the extended attributes of /etc/foo.conf
-- xattr: name=/etc/foo.conf
+- xattr:
+ name: /etc/foo.conf
# Sets the key 'foo' to value 'bar'
-- xattr: path=/etc/foo.conf key=user.foo value=bar
+- xattr:
+ path: /etc/foo.conf
+ key: user.foo
+ value: bar
# Removes the key 'foo'
-- xattr: name=/etc/foo.conf key=user.foo state=absent
+- xattr:
+ name: /etc/foo.conf
+ key: user.foo
+ state: absent
'''
import operator
+import re
+import os
def get_xattr_keys(module,path,follow):
cmd = [ module.get_bin_path('getfattr', True) ]
@@ -202,7 +215,7 @@ def main():
module.exit_json(changed=changed, msg=msg, xattr=res)
# import module snippets
-from ansible.module_utils.basic import *
-
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
diff --git a/inventory/add_host.py b/inventory/add_host.py
index ef01ed1051b..44ae5303cc3 100644
--- a/inventory/add_host.py
+++ b/inventory/add_host.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: add_host
@@ -37,20 +41,31 @@
notes:
- This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it
to iterate use a with\_ directive.
-author:
+author:
- "Ansible Core Team"
- "Seth Vidal"
'''
EXAMPLES = '''
# add host to group 'just_created' with variable foo=42
-- add_host: name={{ ip_from_ec2 }} groups=just_created foo=42
+- add_host:
+ name: "{{ ip_from_ec2 }}"
+ groups: just_created
+ foo: 42
# add a host with a non-standard port local to your machines
-- add_host: name={{ new_ip }}:{{ new_port }}
+- add_host:
+ name: "{{ new_ip }}:{{ new_port }}"
+
+# add a host alias that we reach through a tunnel (Ansible <= 1.9)
+- add_host:
+ hostname: "{{ new_ip }}"
+ ansible_ssh_host: "{{ inventory_hostname }}"
+ ansible_ssh_port: "{{ new_port }}"
-# add a host alias that we reach through a tunnel
-- add_host: hostname={{ new_ip }}
- ansible_ssh_host={{ inventory_hostname }}
- ansible_ssh_port={{ new_port }}
+# add a host alias that we reach through a tunnel (Ansible >= 2.0)
+- add_host:
+ hostname: "{{ new_ip }}"
+ ansible_host: "{{ inventory_hostname }}"
+ ansible_port: "{{ new_port }}"
'''
diff --git a/inventory/group_by.py b/inventory/group_by.py
index 4bfd20206be..c7cb6a034ba 100644
--- a/inventory/group_by.py
+++ b/inventory/group_by.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: group_by
@@ -34,7 +38,10 @@
EXAMPLES = '''
# Create groups based on the machine architecture
-- group_by: key=machine_{{ ansible_machine }}
+- group_by:
+ key: machine_{{ ansible_machine }}
+
# Create groups like 'kvm-host'
-- group_by: key=virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
+- group_by:
+ key: virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
'''
diff --git a/network/basics/get_url.py b/network/basics/get_url.py
index 6a85b161141..a15b78df4fe 100644
--- a/network/basics/get_url.py
+++ b/network/basics/get_url.py
@@ -25,6 +25,10 @@
import re
import tempfile
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: get_url
@@ -101,7 +105,7 @@
If you worry about portability, only the sha1 algorithm is available
on all platforms and python versions. The third party hashlib
library can be installed for access to additional algorithms.
- Additionaly, if a checksum is passed to this parameter, and the file exist under
+ Additionally, if a checksum is passed to this parameter, and the file exist under
the C(dest) location, the destination_checksum would be calculated, and if
checksum equals destination_checksum, the file download would be skipped
(unless C(force) is true). '
@@ -169,20 +173,39 @@
EXAMPLES='''
- name: download foo.conf
- get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf mode=0440
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ mode: 0440
- name: download file and force basic auth
- get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf force_basic_auth=yes
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ force_basic_auth: yes
- name: download file with custom HTTP headers
- get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf headers='key:value,key:value'
-
-- name: download file with check
- get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf checksum=sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
- get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf checksum=md5:66dffb5228a211e61d6d7ef4a86f5758
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ headers: 'key:value,key:value'
+
+- name: download file with check (sha256)
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ checksum: sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
+
+- name: download file with check (md5)
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ checksum: md5:66dffb5228a211e61d6d7ef4a86f5758
- name: download file from a file path
- get_url: url="file:///tmp/afile.txt" dest=/tmp/afilecopy.txt
+ get_url:
+ url: "file:///tmp/afile.txt"
+ dest: /tmp/afilecopy.txt
'''
from ansible.module_utils.six.moves.urllib.parse import urlsplit
@@ -208,10 +231,14 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head
if info['status'] == 304:
module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''))
- # create a temporary file and copy content to do checksum-based replacement
- if info['status'] != 200 and not url.startswith('file:/'):
+ # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
+ if info['status'] == -1:
+ module.fail_json(msg=info['msg'], url=url, dest=dest)
+
+ if info['status'] != 200 and not url.startswith('file:/') and not (url.startswith('ftp:/') and info.get('msg', '').startswith('OK')):
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest)
+ # create a temporary file and copy content to do checksum-based replacement
if tmp_dest != '':
# tmp_dest should be an existing dir
tmp_dest_is_dir = os.path.isdir(tmp_dest)
@@ -219,7 +246,7 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head
if os.path.exists(tmp_dest):
module.fail_json(msg="%s is a file but should be a directory." % tmp_dest)
else:
- module.fail_json(msg="%s directoy does not exist." % tmp_dest)
+ module.fail_json(msg="%s directory does not exist." % tmp_dest)
fd, tempname = tempfile.mkstemp(dir=tmp_dest)
else:
@@ -346,6 +373,11 @@ def main():
mtime = os.path.getmtime(dest)
last_mod_time = datetime.datetime.utcfromtimestamp(mtime)
+ # If the checksum does not match we have to force the download
+ # because last_mod_time may be newer than on remote
+ if checksum_mismatch:
+ force = True
+
# download to tmpsrc
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest)
diff --git a/network/basics/slurp.py b/network/basics/slurp.py
index 991e0be05e7..f2ece413da8 100644
--- a/network/basics/slurp.py
+++ b/network/basics/slurp.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: slurp
diff --git a/network/basics/uri.py b/network/basics/uri.py
index 844761c38bd..24257dc3566 100644
--- a/network/basics/uri.py
+++ b/network/basics/uri.py
@@ -20,18 +20,9 @@
#
# see examples/playbooks/uri.yml
-import cgi
-import shutil
-import tempfile
-import datetime
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-import ansible.module_utils.six as six
-
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -66,7 +57,7 @@
body:
description:
- The body of the http request/response to the web service. If C(body_format) is set
- to 'json' it will take an already formated JSON string or convert a data structure
+ to 'json' it will take an already formatted JSON string or convert a data structure
into JSON.
required: false
default: null
@@ -166,20 +157,24 @@
'''
EXAMPLES = '''
-# Check that you can connect (GET) to a page and it returns a status 200
-- uri: url=http://www.example.com
+- name: Check that you can connect (GET) to a page and it returns a status 200
+ uri:
+ url: http://www.example.com
# Check that a page returns a status 200 and fail if the word AWESOME is not
# in the page contents.
-- action: uri url=http://www.example.com return_content=yes
+- uri:
+ url: http://www.example.com
+ return_content: yes
register: webpage
-- action: fail
+- name: Fail if AWESOME is not in the page content
+ fail:
when: "'AWESOME' not in webpage.content"
-# Create a JIRA issue
-- uri:
+- name: Create a JIRA issue
+ uri:
url: https://your.jira.example.com/rest/api/2/issue/
method: POST
user: your_username
@@ -206,8 +201,8 @@
return_content: yes
HEADER_Cookie: "{{login.set_cookie}}"
-# Queue build of a project in Jenkins:
-- uri:
+- name: Queue build of a project in Jenkins
+ uri:
url: "http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}"
method: GET
user: "{{ jenkins.user }}"
@@ -217,6 +212,23 @@
'''
+import cgi
+import datetime
+import os
+import shutil
+import tempfile
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+import ansible.module_utils.six as six
+from ansible.module_utils._text import to_text
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
def write_file(module, url, dest, content):
# create a tempfile with some test content
@@ -384,14 +396,14 @@ def main():
dict_headers = module.params['headers']
if body_format == 'json':
- # Encode the body unless its a string, then assume it is preformatted JSON
+ # Encode the body unless its a string, then assume it is pre-formatted JSON
if not isinstance(body, basestring):
body = json.dumps(body)
dict_headers['Content-Type'] = 'application/json'
# Grab all the http headers. Need this hack since passing multi-values is
# currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}')
- for key, value in module.params.iteritems():
+ for key, value in six.iteritems(module.params):
if key.startswith("HEADER_"):
skey = key.replace("HEADER_", "")
dict_headers[skey] = value
@@ -434,9 +446,11 @@ def main():
# Transmogrify the headers, replacing '-' with '_', since variables dont
# work with dashes.
+ # In python3, the headers are title cased. Lowercase them to be
+ # compatible with the python2 behaviour.
uresp = {}
- for key, value in resp.iteritems():
- ukey = key.replace("-", "_")
+ for key, value in six.iteritems(resp):
+ ukey = key.replace("-", "_").lower()
uresp[ukey] = value
try:
@@ -450,7 +464,7 @@ def main():
content_type, params = cgi.parse_header(uresp['content_type'])
if 'charset' in params:
content_encoding = params['charset']
- u_content = unicode(content, content_encoding, errors='replace')
+ u_content = to_text(content, encoding=content_encoding)
if 'application/json' in content_type or 'text/json' in content_type:
try:
js = json.loads(u_content)
@@ -458,7 +472,7 @@ def main():
except:
pass
else:
- u_content = unicode(content, content_encoding, errors='replace')
+ u_content = to_text(content, encoding=content_encoding)
if resp['status'] not in status_code:
uresp['msg'] = 'Status code was not %s: %s' % (status_code, uresp.get('msg', ''))
@@ -469,9 +483,5 @@ def main():
module.exit_json(changed=changed, **uresp)
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-
if __name__ == '__main__':
main()
diff --git a/network/cumulus/cl_bond.py b/network/cumulus/cl_bond.py
index 5221699b58d..baf9b0fe844 100644
--- a/network/cumulus/cl_bond.py
+++ b/network/cumulus/cl_bond.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: cl_bond
@@ -146,33 +150,39 @@
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a bond interface with IP address
-cl_bond: name=bond0 slaves="swp4-5" ipv4=10.1.1.1/24
-notify: reload networking
+- cl_bond:
+ name: bond0
+ slaves: "swp4-5"
+ ipv4: 10.1.1.1/24
+ notify: reload networking
# configure bond as a dual-connected clag bond
-cl_bond: name=bond1 slaves="swp1s0 swp2s0" clag_id=1
-notify: reload networking
+- cl_bond:
+ name: bond1
+ slaves: "swp1s0 swp2s0"
+ clag_id: 1
+ notify: reload networking
# define cl_bond once in tasks file
-# then write inteface config in variables file
+# then write interface config in variables file
# with just the options you want.
-cl_bond:
- name: "{{ item.key }}"
- slaves: "{{ item.value.slaves }}"
- clag_id: "{{ item.value.clag_id|default(omit) }}"
- ipv4: "{{ item.value.ipv4|default(omit) }}"
- ipv6: "{{ item.value.ipv6|default(omit) }}"
- alias_name: "{{ item.value.alias_name|default(omit) }}"
- addr_method: "{{ item.value.addr_method|default(omit) }}"
- mtu: "{{ item.value.mtu|default(omit) }}"
- vids: "{{ item.value.vids|default(omit) }}"
- virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
- virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
- mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}"
- mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}"
- mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}"
-with_dict: cl_bonds
-notify: reload networking
+- cl_bond:
+ name: "{{ item.key }}"
+ slaves: "{{ item.value.slaves }}"
+ clag_id: "{{ item.value.clag_id|default(omit) }}"
+ ipv4: "{{ item.value.ipv4|default(omit) }}"
+ ipv6: "{{ item.value.ipv6|default(omit) }}"
+ alias_name: "{{ item.value.alias_name|default(omit) }}"
+ addr_method: "{{ item.value.addr_method|default(omit) }}"
+ mtu: "{{ item.value.mtu|default(omit) }}"
+ vids: "{{ item.value.vids|default(omit) }}"
+ virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
+ virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
+ mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}"
+ mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}"
+ mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}"
+ with_dict: "{{ cl_bonds }}"
+ notify: reload networking
# In vars file
# ============
diff --git a/network/cumulus/cl_bridge.py b/network/cumulus/cl_bridge.py
index 44822b9453e..e2805307c28 100644
--- a/network/cumulus/cl_bridge.py
+++ b/network/cumulus/cl_bridge.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: cl_bridge
@@ -101,40 +105,47 @@
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a bridge vlan aware bridge.
-cl_bridge: name=br0 ports='swp1-12' vlan_aware='yes'
-notify: reload networking
+- cl_bridge:
+ name: br0
+ ports: 'swp1-12'
+ vlan_aware: 'yes'
+ notify: reload networking
# configure bridge interface to define a default set of vlans
-cl_bridge: name=bridge ports='swp1-12' vlan_aware='yes' vids='1-100'
-notify: reload networking
+- cl_bridge:
+ name: bridge
+ ports: 'swp1-12'
+ vlan_aware: 'yes'
+ vids: '1-100'
+ notify: reload networking
# define cl_bridge once in tasks file
-# then write inteface config in variables file
+# then write interface config in variables file
# with just the options you want.
-cl_bridge:
- name: "{{ item.key }}"
- ports: "{{ item.value.ports }}"
- vlan_aware: "{{ item.value.vlan_aware|default(omit) }}"
- ipv4: "{{ item.value.ipv4|default(omit) }}"
- ipv6: "{{ item.value.ipv6|default(omit) }}"
- alias_name: "{{ item.value.alias_name|default(omit) }}"
- addr_method: "{{ item.value.addr_method|default(omit) }}"
- mtu: "{{ item.value.mtu|default(omit) }}"
- vids: "{{ item.value.vids|default(omit) }}"
- virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
- virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
- mstpctl_treeprio: "{{ item.value.mstpctl_treeprio|default(omit) }}"
-with_dict: cl_bridges
-notify: reload networking
+- cl_bridge:
+ name: "{{ item.key }}"
+ ports: "{{ item.value.ports }}"
+ vlan_aware: "{{ item.value.vlan_aware|default(omit) }}"
+ ipv4: "{{ item.value.ipv4|default(omit) }}"
+ ipv6: "{{ item.value.ipv6|default(omit) }}"
+ alias_name: "{{ item.value.alias_name|default(omit) }}"
+ addr_method: "{{ item.value.addr_method|default(omit) }}"
+ mtu: "{{ item.value.mtu|default(omit) }}"
+ vids: "{{ item.value.vids|default(omit) }}"
+ virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
+ virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
+ mstpctl_treeprio: "{{ item.value.mstpctl_treeprio|default(omit) }}"
+ with_dict: "{{ cl_bridges }}"
+ notify: reload networking
# In vars file
# ============
cl_bridge:
- br0:
- alias_name: 'vlan aware bridge'
- ports: ['swp1', 'swp3']
- vlan_aware: true
- vids: ['1-100']
+ br0:
+ alias_name: 'vlan aware bridge'
+ ports: ['swp1', 'swp3']
+ vlan_aware: true
+ vids: ['1-100']
'''
RETURN = '''
diff --git a/network/cumulus/cl_img_install.py b/network/cumulus/cl_img_install.py
index 00e4f9034c2..26fe8857b6f 100644
--- a/network/cumulus/cl_img_install.py
+++ b/network/cumulus/cl_img_install.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: cl_img_install
@@ -59,32 +63,40 @@
## Download and install the image from a webserver.
- - name: install image using using http url. Switch slots so the subsequent
- will load the new version
- cl_img_install: version=2.0.1
- src='http://10.1.1.1/CumulusLinux-2.0.1.bin'
- switch_slot=yes
+- name: Install image using using http url. Switch slots so the subsequent will load the new version
+ cl_img_install:
+ version: 2.0.1
+ src: http://10.1.1.1/CumulusLinux-2.0.1.bin
+ switch_slot: yes
## Copy the software from the ansible server to the switch.
## The module will get the code version from the filename
## The code will be installed in the alternate slot but the slot will not be primary
## A subsequent reload will not run the new code
- - name: download cumulus linux to local system
- get_url: src=ftp://cumuluslinux.bin dest=/root/CumulusLinux-2.0.1.bin
+- name: Download cumulus linux to local system
+ get_url:
+ src: ftp://cumuluslinux.bin
+ dest: /root/CumulusLinux-2.0.1.bin
- - name: install image from local filesystem. Get version from the filename
- cl_img_install: src='/root/CumulusLinux-2.0.1.bin'
+- name: Install image from local filesystem. Get version from the filename.
+ cl_img_install:
+ src: /root/CumulusLinux-2.0.1.bin
## If the image name has been changed from the original name, use the `version` option
## to inform the module exactly what code version is been installed
- - name: download cumulus linux to local system
- get_url: src=ftp://CumulusLinux-2.0.1.bin dest=/root/image.bin
+- name: Download cumulus linux to local system
+ get_url:
+ src: ftp://CumulusLinux-2.0.1.bin
+ dest: /root/image.bin
- - name: install image and switch slots. only reboot needed
- cl_img_install: version=2.0.1 src=/root/image.bin switch_slot=yes'
+- name: install image and switch slots. only reboot needed
+ cl_img_install:
+ version: 2.0.1
+ src: /root/image.bin
+ switch_slot: yes
'''
RETURN = '''
diff --git a/network/cumulus/cl_interface.py b/network/cumulus/cl_interface.py
index f5b84c127a9..c8262b0710e 100644
--- a/network/cumulus/cl_interface.py
+++ b/network/cumulus/cl_interface.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: cl_interface
@@ -114,45 +118,55 @@
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
-# configure a front panel port with an IP
-cl_interface: name=swp1 ipv4=10.1.1.1/24
-notify: reload networking
-
-# configure front panel to use DHCP
-cl_interface: name=swp2 addr_family=dhcp
-notify: reload networking
-
-# configure a SVI for vlan 100 interface with an IP
-cl_interface: name=bridge.100 ipv4=10.1.1.1/24
-notify: reload networking
-
-# configure subinterface with an IP
-cl_interface: name=bond0.100 alias_name='my bond' ipv4=10.1.1.1/24
-notify: reload networking
+- name: Configure a front panel port with an IP
+ cl_interface:
+ name: swp1
+ ipv4: 10.1.1.1/24
+ notify: reload networking
+
+- name: Configure front panel to use DHCP
+ cl_interface:
+ name: swp2
+ addr_family: dhcp
+ notify: reload networking
+
+- name: Configure a SVI for vlan 100 interface with an IP
+ cl_interface:
+ name: bridge.100
+ ipv4: 10.1.1.1/24
+ notify: reload networking
+
+- name: Configure subinterface with an IP
+ cl_interface:
+ name: bond0.100
+ alias_name: 'my bond'
+ ipv4: 10.1.1.1/24
+ notify: reload networking
# define cl_interfaces once in tasks
-# then write intefaces in variables file
+# then write interfaces in variables file
# with just the options you want.
-cl_interface:
- name: "{{ item.key }}"
- ipv4: "{{ item.value.ipv4|default(omit) }}"
- ipv6: "{{ item.value.ipv6|default(omit) }}"
- alias_name: "{{ item.value.alias_name|default(omit) }}"
- addr_method: "{{ item.value.addr_method|default(omit) }}"
- speed: "{{ item.value.link_speed|default(omit) }}"
- mtu: "{{ item.value.mtu|default(omit) }}"
- clagd_enable: "{{ item.value.clagd_enable|default(omit) }}"
- clagd_peer_ip: "{{ item.value.clagd_peer_ip|default(omit) }}"
- clagd_sys_mac: "{{ item.value.clagd_sys_mac|default(omit) }}"
- clagd_priority: "{{ item.value.clagd_priority|default(omit) }}"
- vids: "{{ item.value.vids|default(omit) }}"
- virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
- virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
- mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}"
- mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}"
- mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}"
-with_dict: cl_interfaces
-notify: reload networking
+ - name: Create interfaces
+ cl_interface:
+ name: "{{ item.key }}"
+ ipv4: "{{ item.value.ipv4 | default(omit) }}"
+ ipv6: "{{ item.value.ipv6 | default(omit) }}"
+ alias_name: "{{ item.value.alias_name | default(omit) }}"
+ addr_method: "{{ item.value.addr_method | default(omit) }}"
+ speed: "{{ item.value.link_speed | default(omit) }}"
+ mtu: "{{ item.value.mtu | default(omit) }}"
+ clagd_enable: "{{ item.value.clagd_enable | default(omit) }}"
+ clagd_peer_ip: "{{ item.value.clagd_peer_ip | default(omit) }}"
+ clagd_sys_mac: "{{ item.value.clagd_sys_mac | default(omit) }}"
+ clagd_priority: "{{ item.value.clagd_priority | default(omit) }}"
+ vids: "{{ item.value.vids | default(omit) }}"
+ virtual_ip: "{{ item.value.virtual_ip | default(omit) }}"
+ virtual_mac: "{{ item.value.virtual_mac | default(omit) }}"
+ mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork | default('no') }}"
+ mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge | default('no') }}"
+ mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard | default('no') }}"
+ with_dict: "{{ cl_interfaces }}"
+ notify: reload networking
# In vars file
diff --git a/network/cumulus/cl_interface_policy.py b/network/cumulus/cl_interface_policy.py
index ae99cb1bf02..597f11a3fc3 100644
--- a/network/cumulus/cl_interface_policy.py
+++ b/network/cumulus/cl_interface_policy.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: cl_interface_policy
@@ -73,7 +77,7 @@ def read_current_int_dir(module):
module.custom_currentportlist = os.listdir(module.params.get('location'))
-# take the allowed list and conver it to into a list
+# take the allowed list and convert it to into a list
# of ports.
def convert_allowed_list_to_port_range(module):
allowedlist = module.params.get('allowed')
diff --git a/network/cumulus/cl_license.py b/network/cumulus/cl_license.py
index 7690d8ac1ce..a0656e0abce 100644
--- a/network/cumulus/cl_license.py
+++ b/network/cumulus/cl_license.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: cl_license
@@ -55,33 +59,37 @@
'''
EXAMPLES = '''
-Example playbook using the cl_license module to manage licenses on Cumulus Linux
-
----
- - hosts: all
- tasks:
- - name: install license using http url
- cl_license: src='http://10.1.1.1/license.txt'
- notify: restart switchd
-
- - name: Triggers switchd to be restarted right away, before play, or role
- is over. This is desired behaviour
- meta: flush_handlers
-
- - name: configure interfaces
- template: src=interfaces.j2 dest=/etc/network/interfaces
- notify: restart networking
-
- handlers:
- - name: restart switchd
- service: name=switchd state=restarted
- - name: restart networking
- service: name=networking state=reloaded
-
-----
+# Example playbook using the cl_license module to manage licenses on Cumulus Linux
+
+- hosts: all
+ tasks:
+ - name: install license using http url
+ cl_license:
+ src: http://10.1.1.1/license.txt
+ notify: restart switchd
+
+ - name: Triggers switchd to be restarted right away, before play, or role
+ is over. This is desired behaviour
+ meta: flush_handlers
+
+ - name: Configure interfaces
+ template:
+ src: interfaces.j2
+ dest: /etc/network/interfaces
+ notify: restart networking
+
+ handlers:
+ - name: restart switchd
+ service:
+ name: switchd
+ state: restarted
+ - name: restart networking
+ service:
+ name: networking
+ state: reloaded
# Force all switches to accept a new license. Typically not needed
-ansible -m cl_license -a "src='http://10.1.1.1/new_lic' force=yes" -u root all
+ansible -m cl_license -a "src='http://10.1.1.1/new_lic' force=yes" -u root all
----
diff --git a/network/cumulus/cl_ports.py b/network/cumulus/cl_ports.py
index 02728203400..85b3ed94d02 100644
--- a/network/cumulus/cl_ports.py
+++ b/network/cumulus/cl_ports.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: cl_ports
@@ -48,7 +52,9 @@
## Unganged port config using simple args
- name: configure ports.conf setup
- cl_ports: speed_4_by_10g="swp1, swp32" speed_40g="swp2-31"
+ cl_ports:
+ speed_4_by_10g: "swp1, swp32"
+ speed_40g: "swp2-31"
notify: restart switchd
## Unganged port configuration on certain ports using complex args
diff --git a/__init__.py b/network/dellos10/__init__.py
similarity index 100%
rename from __init__.py
rename to network/dellos10/__init__.py
diff --git a/network/dellos10/dellos10_command.py b/network/dellos10/dellos10_command.py
new file mode 100644
index 00000000000..77e381c9117
--- /dev/null
+++ b/network/dellos10/dellos10_command.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+#
+# (c) 2015 Peter Sprygada,
+#
+# Copyright (c) 2016 Dell Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: dellos10_command
+version_added: "2.2"
+author: "Senthil Kumar Ganesan (@skg-net)"
+short_description: Run commands on remote devices running Dell OS10
+description:
+ - Sends arbitrary commands to a Dell OS10 node and returns the results
+ read from the device. This module includes an
+ argument that will cause the module to wait for a specific condition
+ before returning or timing out if the condition is not met.
+ - This module does not support running commands in configuration mode.
+ Please use M(dellos10_config) to configure Dell OS10 devices.
+extends_documentation_fragment: dellos10
+options:
+ commands:
+ description:
+ - List of commands to send to the remote dellos10 device over the
+ configured provider. The resulting output from the command
+ is returned. If the I(wait_for) argument is provided, the
+ module is not returned until the condition is satisfied or
+ the number of retries has expired.
+ required: true
+ wait_for:
+ description:
+ - List of conditions to evaluate against the output of the
+ command. The task will wait for each condition to be true
+ before moving forward. If the conditional is not true
+ within the configured number of I(retries), the task fails.
+ See examples.
+ required: false
+ default: null
+ retries:
+ description:
+ - Specifies the number of retries a command should by tried
+ before it is considered failed. The command is run on the
+ target device every retry and evaluated against the
+ I(wait_for) conditions.
+ required: false
+ default: 10
+ interval:
+ description:
+ - Configures the interval in seconds to wait between retries
+ of the command. If the command does not pass the specified
+ conditions, the interval indicates how long to wait before
+ trying the command again.
+ required: false
+ default: 1
+"""
+
+EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: admin
+ password: admin
+ transport: cli
+
+tasks:
+ - name: run show version on remote devices
+ dellos10_command:
+ commands: show version
+ provider: "{{ cli }}"
+
+ - name: run show version and check to see if output contains OS10
+ dellos10_command:
+ commands: show version
+ wait_for: result[0] contains OS10
+ provider: "{{ cli }}"
+
+ - name: run multiple commands on remote nodes
+ dellos10_command:
+ commands:
+ - show version
+ - show interface
+ provider: "{{ cli }}"
+
+ - name: run multiple commands and evaluate the output
+ dellos10_command:
+ commands:
+ - show version
+ - show interface
+ wait_for:
+ - result[0] contains OS10
+ - result[1] contains Ethernet
+ provider: "{{ cli }}"
+"""
+
+RETURN = """
+stdout:
+ description: The set of responses from the commands
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+stdout_lines:
+ description: The value of stdout split into a list
+ returned: always
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+
+failed_conditions:
+ description: The list of conditionals that have failed
+ returned: failed
+ type: list
+ sample: ['...', '...']
+
+warnings:
+ description: The list of warnings (if any) generated by module based on arguments
+ returned: always
+ type: list
+ sample: ['...', '...']
+"""
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcli import CommandRunner, FailedConditionsError
+from ansible.module_utils.network import NetworkModule, NetworkError
+import ansible.module_utils.dellos10
+
+def to_lines(stdout):
+ for item in stdout:
+ if isinstance(item, basestring):
+ item = str(item).split('\n')
+ yield item
+
+def main():
+ spec = dict(
+ commands=dict(type='list', required=True),
+ wait_for=dict(type='list'),
+ retries=dict(default=10, type='int'),
+ interval=dict(default=1, type='int')
+ )
+
+ module = NetworkModule(argument_spec=spec,
+ connect_on_load=False,
+ supports_check_mode=True)
+
+ commands = module.params['commands']
+ conditionals = module.params['wait_for'] or list()
+
+ warnings = list()
+
+ runner = CommandRunner(module)
+
+ for cmd in commands:
+ if module.check_mode and not cmd.startswith('show'):
+ warnings.append('only show commands are supported when using '
+ 'check mode, not executing `%s`' % cmd)
+ else:
+ if cmd.startswith('conf'):
+ module.fail_json(msg='dellos10_command does not support running '
+ 'config mode commands. Please use '
+ 'dellos10_config instead')
+ runner.add_command(cmd)
+
+ for item in conditionals:
+ runner.add_conditional(item)
+
+ runner.retries = module.params['retries']
+ runner.interval = module.params['interval']
+
+ try:
+ runner.run()
+ except FailedConditionsError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+
+ result = dict(changed=False)
+
+ result['stdout'] = list()
+ for cmd in commands:
+ try:
+ output = runner.get_command(cmd)
+ except ValueError:
+ output = 'command not executed due to check_mode, see warnings'
+ result['stdout'].append(output)
+
+
+ result['warnings'] = warnings
+ result['stdout_lines'] = list(to_lines(result['stdout']))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/dellos10/dellos10_config.py b/network/dellos10/dellos10_config.py
new file mode 100644
index 00000000000..469fa4ca0b3
--- /dev/null
+++ b/network/dellos10/dellos10_config.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+#
+# (c) 2015 Peter Sprygada,
+#
+# Copyright (c) 2016 Dell Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: dellos10_config
+version_added: "2.2"
+author: "Senthil Kumar Ganesan (@skg-net)"
+short_description: Manage Dell OS10 configuration sections
+description:
+ - Dell OS10 configurations use a simple block indent file syntax
+ for segmenting configuration into sections. This module provides
+ an implementation for working with Dell OS10 configuration sections in
+ a deterministic way.
+extends_documentation_fragment: dellos10
+options:
+ lines:
+ description:
+ - The ordered set of commands that should be configured in the
+ section. The commands must be the exact same commands as found
+ in the device running-config. Be sure to note the configuration
+ command syntax as some commands are automatically modified by the
+ device config parser. This argument is mutually exclusive with I(src).
+ required: false
+ default: null
+ aliases: ['commands']
+ parents:
+ description:
+ - The ordered set of parents that uniquely identify the section
+ the commands should be checked against. If the parents argument
+ is omitted, the commands are checked against the set of top
+ level or global commands.
+ required: false
+ default: null
+ src:
+ description:
+ - Specifies the source path to the file that contains the configuration
+ or configuration template to load. The path to the source file can
+ either be the full path on the Ansible control host or a relative
+ path from the playbook or role root dir. This argument is mutually
+ exclusive with I(lines).
+ required: false
+ default: null
+ before:
+ description:
+ - The ordered set of commands to push on to the command stack if
+ a change needs to be made. This allows the playbook designer
+ the opportunity to perform configuration commands prior to pushing
+ any changes without affecting how the set of commands are matched
+ against the system.
+ required: false
+ default: null
+ after:
+ description:
+ - The ordered set of commands to append to the end of the command
+ stack if a change needs to be made. Just like with I(before) this
+ allows the playbook designer to append a set of commands to be
+ executed after the command set.
+ required: false
+ default: null
+ match:
+ description:
+ - Instructs the module on the way to perform the matching of
+ the set of commands against the current device config. If
+ match is set to I(line), commands are matched line by line. If
+ match is set to I(strict), command lines are matched with respect
+ to position. If match is set to I(exact), command lines
+ must be an equal match. Finally, if match is set to I(none), the
+ module will not attempt to compare the source configuration with
+ the running configuration on the remote device.
+ required: false
+ default: line
+ choices: ['line', 'strict', 'exact', 'none']
+ replace:
+ description:
+ - Instructs the module on the way to perform the configuration
+ on the device. If the replace argument is set to I(line) then
+ the modified lines are pushed to the device in configuration
+ mode. If the replace argument is set to I(block) then the entire
+ command block is pushed to the device in configuration mode if any
+ line is not correct.
+ required: false
+ default: line
+ choices: ['line', 'block']
+ update:
+ description:
+ - The I(update) argument controls how the configuration statements
+ are processed on the remote device. Valid choices for the I(update)
+ argument are I(merge) and I(check). When the argument is set to
+ I(merge), the configuration changes are merged with the current
+ device running configuration. When the argument is set to I(check)
+ the configuration updates are determined but not actually configured
+ on the remote device.
+ required: false
+ default: merge
+ choices: ['merge', 'check']
+ save:
+ description:
+ - The C(save) argument instructs the module to save the running-
+ config to the startup-config at the conclusion of the module
+ running. If check mode is specified, this argument is ignored.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ config:
+ description:
+ - The C(config) argument allows the playbook designer to supply
+ the base configuration to be used to validate configuration
+ changes necessary. If this argument is provided, the module
+ will not download the running-config from the remote node.
+ required: false
+ default: null
+ backup:
+ description:
+ - This argument will cause the module to create a full backup of
+ the current C(running-config) from the remote device before any
+ changes are made. The backup file is written to the C(backup)
+ folder in the playbook root directory. If the directory does not
+ exist, it is created.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+"""
+
+EXAMPLES = """
+- dellos10_config:
+ lines: ['hostname {{ inventory_hostname }}']
+ provider: "{{ cli }}"
+
+- dellos10_config:
+ lines:
+ - 10 permit ip host 1.1.1.1 any log
+ - 20 permit ip host 2.2.2.2 any log
+ - 30 permit ip host 3.3.3.3 any log
+ - 40 permit ip host 4.4.4.4 any log
+ - 50 permit ip host 5.5.5.5 any log
+ parents: ['ip access-list test']
+ before: ['no ip access-list test']
+ match: exact
+ provider: "{{ cli }}"
+
+- dellos10_config:
+ lines:
+ - 10 permit ip host 1.1.1.1 any log
+ - 20 permit ip host 2.2.2.2 any log
+ - 30 permit ip host 3.3.3.3 any log
+ - 40 permit ip host 4.4.4.4 any log
+ parents: ['ip access-list test']
+ before: ['no ip access-list test']
+ replace: block
+ provider: "{{ cli }}"
+
+"""
+
+RETURN = """
+updates:
+ description: The set of commands that will be pushed to the remote device
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+responses:
+ description: The set of responses from issuing the commands on the device
+ returned: when not check_mode
+ type: list
+ sample: ['...', '...']
+
+saved:
+ description: Returns whether the configuration is saved to the startup
+ configuration or not.
+ returned: when not check_mode
+ type: bool
+ sample: True
+
+"""
+from ansible.module_utils.netcfg import NetworkConfig, dumps
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.dellos10 import get_config, get_sublevel_config
+
+def get_candidate(module):
+ candidate = NetworkConfig(indent=1)
+ if module.params['src']:
+ candidate.load(module.params['src'])
+ elif module.params['lines']:
+ parents = module.params['parents'] or list()
+ candidate.add(module.params['lines'], parents=parents)
+ return candidate
+
+
+def main():
+
+ argument_spec = dict(
+ lines=dict(aliases=['commands'], type='list'),
+ parents=dict(type='list'),
+
+ src=dict(type='path'),
+
+ before=dict(type='list'),
+ after=dict(type='list'),
+
+ match=dict(default='line',
+ choices=['line', 'strict', 'exact', 'none']),
+ replace=dict(default='line', choices=['line', 'block']),
+ update=dict(choices=['merge', 'check'], default='merge'),
+ save=dict(type='bool', default=False),
+ config=dict(),
+ backup=dict(type='bool', default=False)
+ )
+
+ mutually_exclusive = [('lines', 'src')]
+
+ module = NetworkModule(argument_spec=argument_spec,
+ connect_on_load=False,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
+
+ parents = module.params['parents'] or list()
+
+ match = module.params['match']
+ replace = module.params['replace']
+ result = dict(changed=False, saved=False)
+
+ candidate = get_candidate(module)
+
+ if match != 'none':
+ config = get_config(module)
+ if parents:
+ contents = get_sublevel_config(config, module)
+ config = NetworkConfig(contents=contents, indent=1)
+ configobjs = candidate.difference(config, match=match, replace=replace)
+
+ else:
+ configobjs = candidate.items
+
+ if module.params['backup']:
+ result['__backup__'] = module.cli('show running-config')[0]
+
+ commands = list()
+ if configobjs:
+ commands = dumps(configobjs, 'commands')
+ commands = commands.split('\n')
+
+ if module.params['before']:
+ commands[:0] = module.params['before']
+
+ if module.params['after']:
+ commands.extend(module.params['after'])
+
+ if not module.check_mode and module.params['update'] == 'merge':
+ response = module.config.load_config(commands)
+ result['responses'] = response
+
+ if module.params['save']:
+ module.config.save_config()
+ result['saved'] = True
+
+ result['changed'] = True
+
+ result['updates'] = commands
+
+ module.exit_json(**result)
+
+if __name__ == '__main__':
+ main()
diff --git a/network/dellos10/dellos10_facts.py b/network/dellos10/dellos10_facts.py
new file mode 100644
index 00000000000..e73785a0e15
--- /dev/null
+++ b/network/dellos10/dellos10_facts.py
@@ -0,0 +1,454 @@
+#!/usr/bin/python
+#
+# (c) 2015 Peter Sprygada,
+#
+# Copyright (c) 2016 Dell Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: dellos10_facts
+version_added: "2.2"
+author: "Senthil Kumar Ganesan (@skg-net)"
+short_description: Collect facts from remote devices running Dell OS10
+description:
+ - Collects a base set of device facts from a remote device that
+ is running Dell OS10. This module prepends all of the
+ base network fact keys with C(ansible_net_). The facts
+ module will always collect a base set of facts from the device
+ and can enable or disable collection of additional facts.
+extends_documentation_fragment: dellos10
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will restrict the facts collected
+ to a given subset. Possible values for this argument include
+ all, hardware, config, and interfaces. Can specify a list of
+ values to include a larger subset. Values can also be used
+ with an initial C(M(!)) to specify that a specific subset should
+ not be collected.
+ required: false
+ default: '!config'
+"""
+
+EXAMPLES = """
+# Collect all facts from the device
+- dellos10_facts:
+ gather_subset: all
+
+# Collect only the config and default facts
+- dellos10_facts:
+ gather_subset:
+ - config
+
+# Do not collect hardware facts
+- dellos10_facts:
+ gather_subset:
+ - "!hardware"
+"""
+
+RETURN = """
+ansible_net_gather_subset:
+ description: The list of fact subsets collected from the device
+ returned: always
+ type: list
+
+# default
+ansible_net_name:
+ description: The name of the OS which is running
+ returned: always
+ type: str
+ansible_net_version:
+ description: The operating system version running on the remote device
+ returned: always
+ type: str
+ansible_net_servicetag:
+ description: The service tag number of the remote device
+ returned: always
+ type: str
+ansible_net_model:
+ description: The model name returned from the device
+ returned: always
+ type: str
+ansible_net_hostname:
+ description: The configured hostname of the device
+ returned: always
+ type: str
+
+# hardware
+ansible_net_cpu_arch:
+ description: Cpu Architecture of the remote device
+ returned: when hardware is configured
+ type: str
+ansible_net_memfree_mb:
+ description: The available free memory on the remote device in Mb
+ returned: when hardware is configured
+ type: int
+ansible_net_memtotal_mb:
+ description: The total memory on the remote device in Mb
+ returned: when hardware is configured
+ type: int
+
+# config
+ansible_net_config:
+ description: The current active config from the device
+ returned: when config is configured
+ type: str
+
+# interfaces
+ansible_net_all_ipv4_addresses:
+ description: All IPv4 addresses configured on the device
+ returned: when interfaces is configured
+ type: list
+ansible_net_all_ipv6_addresses:
+ description: All IPv6 addresses configured on the device
+ returned: when interfaces is configured
+ type: list
+ansible_net_interfaces:
+ description: A hash of all interfaces running on the system
+ returned: when interfaces is configured
+ type: dict
+ansible_net_neighbors:
+ description: The list of LLDP neighbors from the remote device
+ returned: when interfaces is configured
+ type: dict
+"""
+
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcli import CommandRunner
+from ansible.module_utils.network import NetworkModule
+import ansible.module_utils.dellos10
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ import xml.etree.ElementTree as ET
+
+class FactsBase(object):
+
+ def __init__(self, runner):
+ self.runner = runner
+ self.facts = dict()
+
+ self.commands()
+
+
+class Default(FactsBase):
+
+ def commands(self):
+ self.runner.add_command('show version | display-xml')
+ self.runner.add_command('show system | display-xml')
+ self.runner.add_command('show running-configuration | grep hostname')
+
+ def populate(self):
+
+ data = self.runner.get_command('show version | display-xml')
+ xml_data = ET.fromstring(data)
+
+ self.facts['name'] = self.parse_name(xml_data)
+ self.facts['version'] = self.parse_version(xml_data)
+
+ data = self.runner.get_command('show system | display-xml')
+ xml_data = ET.fromstring(data)
+
+ self.facts['servicetag'] = self.parse_serialnum(xml_data)
+ self.facts['model'] = self.parse_model(xml_data)
+
+ data = self.runner.get_command('show running-configuration | grep hostname')
+ self.facts['hostname'] = self.parse_hostname(data)
+
+ def parse_name(self, data):
+ sw_name = data.find('./data/system-sw-state/sw-version/sw-name')
+ if sw_name is not None:
+ return sw_name.text
+ else:
+ return ""
+
+ def parse_version(self, data):
+ sw_ver = data.find('./data/system-sw-state/sw-version/sw-version')
+ if sw_ver is not None:
+ return sw_ver.text
+ else:
+ return ""
+
+ def parse_hostname(self, data):
+ match = re.search(r'hostname\s+(\S+)', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_model(self, data):
+ prod_name = data.find('./data/system/node/mfg-info/product-name')
+ if prod_name is not None:
+ return prod_name.text
+ else:
+ return ""
+
+ def parse_serialnum(self, data):
+ svc_tag = data.find('./data/system/node/unit/mfg-info/service-tag')
+ if svc_tag is not None:
+ return svc_tag.text
+ else:
+ return ""
+
+
+class Hardware(FactsBase):
+
+ def commands(self):
+ self.runner.add_command('show processes memory | grep Total')
+
+ def populate(self):
+
+ data = self.runner.get_command('show version | display-xml')
+ xml_data = ET.fromstring(data)
+
+ self.facts['cpu_arch'] = self.parse_cpu_arch(xml_data)
+
+ data = self.runner.get_command('show processes memory | grep Total')
+
+ match = self.parse_memory(data)
+ if match:
+ self.facts['memtotal_mb'] = int(match[0]) / 1024
+ self.facts['memfree_mb'] = int(match[2]) / 1024
+
+ def parse_cpu_arch(self, data):
+ cpu_arch = data.find('./data/system-sw-state/sw-version/cpu-arch')
+ if cpu_arch is not None:
+ return cpu_arch.text
+ else:
+ return ""
+
+ def parse_memory(self, data):
+ return re.findall(r'\:\s*(\d+)', data, re.M)
+
+
+class Config(FactsBase):
+
+ def commands(self):
+ self.runner.add_command('show running-config')
+
+ def populate(self):
+ config = self.runner.get_command('show running-config')
+ self.facts['config'] = config
+
+
+class Interfaces(FactsBase):
+
+ def commands(self):
+ self.runner.add_command('show interface | display-xml')
+
+ def populate(self):
+ self.facts['all_ipv4_addresses'] = list()
+ self.facts['all_ipv6_addresses'] = list()
+
+ data = self.runner.get_command('show interface | display-xml')
+
+ xml_data = ET.fromstring(data)
+
+ self.facts['interfaces'] = self.populate_interfaces(xml_data)
+ self.facts['neighbors'] = self.populate_neighbors(xml_data)
+
+ def populate_interfaces(self, interfaces):
+ int_facts = dict()
+
+ for interface in interfaces.findall('./data/interfaces/interface'):
+ intf = dict()
+ name = self.parse_item(interface, 'name')
+
+ intf['description'] = self.parse_item(interface, 'description')
+ intf['duplex'] = self.parse_item(interface, 'duplex')
+ intf['primary_ipv4'] = self.parse_primary_ipv4(interface)
+ intf['secondary_ipv4'] = self.parse_secondary_ipv4(interface)
+ intf['ipv6'] = self.parse_ipv6_address(interface)
+ intf['mtu'] = self.parse_item(interface, 'mtu')
+ intf['type'] = self.parse_item(interface, 'type')
+
+ int_facts[name] = intf
+
+ for interface in interfaces.findall('./data/interfaces-state/interface'):
+ name = self.parse_item(interface, 'name')
+ intf = int_facts[name]
+ intf['bandwidth'] = self.parse_item(interface, 'speed')
+ intf['adminstatus'] = self.parse_item(interface, 'admin-status')
+ intf['operstatus'] = self.parse_item(interface, 'oper-status')
+ intf['macaddress'] = self.parse_item(interface, 'phys-address')
+
+ for interface in interfaces.findall('./data/ports/ports-state/port'):
+ name = self.parse_item(interface, 'name')
+ fanout = self.parse_item(interface, 'fanout-state')
+ mediatype = self.parse_item(interface, 'media-type')
+
+ typ, sname = name.split('-eth')
+
+ if fanout == "BREAKOUT_1x1":
+ name = "ethernet" + sname
+ intf = int_facts[name]
+ intf['mediatype'] = mediatype
+ else:
+ #TODO: Loop for the exact subport
+ for subport in xrange(1, 5):
+ name = "ethernet" + sname + ":" + str(subport)
+ intf = int_facts[name]
+ intf['mediatype'] = mediatype
+
+ return int_facts
+
+ def add_ip_address(self, address, family):
+ if family == 'ipv4':
+ self.facts['all_ipv4_addresses'].append(address)
+ else:
+ self.facts['all_ipv6_addresses'].append(address)
+
+ def parse_item(self, interface, item):
+ elem = interface.find(item)
+ if elem is not None:
+ return elem.text
+ else:
+ return ""
+
+ def parse_primary_ipv4(self, interface):
+ ipv4 = interface.find('ipv4')
+ ip_address = ""
+ if ipv4 is not None:
+ prim_ipaddr = ipv4.find('./address/primary-addr')
+ if prim_ipaddr is not None:
+ ip_address = prim_ipaddr.text
+ self.add_ip_address(ip_address, 'ipv4')
+
+ return ip_address
+
+ def parse_secondary_ipv4(self, interface):
+ ipv4 = interface.find('ipv4')
+ ip_address = ""
+ if ipv4 is not None:
+ sec_ipaddr = ipv4.find('./address/secondary-addr')
+ if sec_ipaddr is not None:
+ ip_address = sec_ipaddr.text
+ self.add_ip_address(ip_address, 'ipv4')
+
+ return ip_address
+
+ def parse_ipv6_address(self, interface):
+ ipv6 = interface.find('ipv6')
+ ip_address = ""
+ if ipv6 is not None:
+ ipv6_addr = ipv6.find('./address/ipv6-address')
+ if ipv6_addr is not None:
+ ip_address = ipv6_addr.text
+ self.add_ip_address(ip_address, 'ipv6')
+
+ return ip_address
+
+ def populate_neighbors(self, interfaces):
+ lldp_facts = dict()
+ for interface in interfaces.findall('./data/interfaces-state/interface'):
+ name = interface.find('name').text
+ rem_sys_name = interface.find('./lldp-rem-neighbor-info/info/rem-system-name')
+ if rem_sys_name is not None:
+ lldp_facts[name] = list()
+ fact = dict()
+ fact['host'] = rem_sys_name.text
+ rem_sys_port = interface.find('./lldp-rem-neighbor-info/info/rem-lldp-port-id')
+ fact['port'] = rem_sys_port.text
+ lldp_facts[name].append(fact)
+
+ return lldp_facts
+
+FACT_SUBSETS = dict(
+ default=Default,
+ hardware=Hardware,
+ interfaces=Interfaces,
+ config=Config,
+)
+
+VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
+
+
+def main():
+ spec = dict(
+ gather_subset=dict(default=['!config'], type='list')
+ )
+
+ module = NetworkModule(argument_spec=spec, supports_check_mode=True)
+
+ gather_subset = module.params['gather_subset']
+
+ runable_subsets = set()
+ exclude_subsets = set()
+
+ for subset in gather_subset:
+ if subset == 'all':
+ runable_subsets.update(VALID_SUBSETS)
+ continue
+
+ if subset.startswith('!'):
+ subset = subset[1:]
+ if subset == 'all':
+ exclude_subsets.update(VALID_SUBSETS)
+ continue
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in VALID_SUBSETS:
+ module.fail_json(msg='Bad subset')
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(VALID_SUBSETS)
+
+ runable_subsets.difference_update(exclude_subsets)
+ runable_subsets.add('default')
+
+ facts = dict()
+ facts['gather_subset'] = list(runable_subsets)
+
+ runner = CommandRunner(module)
+
+ instances = list()
+ for key in runable_subsets:
+ runs = FACT_SUBSETS[key](runner)
+ instances.append(runs)
+
+ runner.run()
+
+ try:
+ for inst in instances:
+ inst.populate()
+ facts.update(inst.facts)
+ except Exception:
+ module.exit_json(out=module.from_json(runner.items))
+
+ ansible_facts = dict()
+ for key, value in facts.iteritems():
+ key = 'ansible_net_%s' % key
+ ansible_facts[key] = value
+
+ module.exit_json(ansible_facts=ansible_facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/dellos6/__init__.py b/network/dellos6/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/network/dellos6/dellos6_command.py b/network/dellos6/dellos6_command.py
new file mode 100644
index 00000000000..19efe7e083b
--- /dev/null
+++ b/network/dellos6/dellos6_command.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+#
+# (c) 2015 Peter Sprygada,
+#
+# Copyright (c) 2016 Dell Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: dellos6_command
+version_added: "2.2"
+short_description: Run commands on remote devices running Dell OS6
+description:
+ - Sends arbitrary commands to a Dell OS6 node and returns the results
+ read from the device. The M(dellos6_command) module includes an
+ argument that will cause the module to wait for a specific condition
+ before returning or timing out if the condition is not met.
+ - This module does not support running commands in configuration mode.
+ Please use M(dellos6_config) to configure Dell OS6 devices.
+extends_documentation_fragment: dellos6
+options:
+ commands:
+ description:
+ - List of commands to send to the remote dellos6 device over the
+ configured provider. The resulting output from the command
+ is returned. If the I(wait_for) argument is provided, the
+ module is not returned until the condition is satisfied or
+ the number of I(retries) as expired.
+ required: true
+ wait_for:
+ description:
+ - List of conditions to evaluate against the output of the
+ command. The task will wait for each condition to be true
+ before moving forward. If the conditional is not true
+ within the configured number of I(retries), the task fails.
+ See examples.
+ required: false
+ default: null
+ retries:
+ description:
+ - Specifies the number of retries a command should be tried
+ before it is considered failed. The command is run on the
+ target device every retry and evaluated against the
+ I(wait_for) conditions.
+ required: false
+ default: 10
+ interval:
+ description:
+ - Configures the interval in seconds to wait between retries
+ of the command. If the command does not pass the specified
+ conditions, the interval indicates how long to wait before
+ trying the command again.
+ required: false
+ default: 1
+"""
+
+EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: admin
+ password: admin
+ transport: cli
+
+tasks:
+ - name: run show version on remote devices
+ dellos6_command:
+ commands: show version
+ provider "{{ cli }}"
+
+ - name: run show version and check to see if output contains Dell
+ dellos6_command:
+ commands: show version
+ wait_for: result[0] contains Dell
+ provider "{{ cli }}"
+
+ - name: run multiple commands on remote nodes
+ dellos6_command:
+ commands:
+ - show version
+ - show interfaces
+ provider "{{ cli }}"
+
+ - name: run multiple commands and evaluate the output
+ dellos6_command:
+ commands:
+ - show version
+ - show interfaces
+ wait_for:
+ - result[0] contains Dell
+ - result[1] contains Access
+ provider "{{ cli }}"
+"""
+
+RETURN = """
+stdout:
+ description: The set of responses from the commands
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+stdout_lines:
+ description: The value of stdout split into a list
+ returned: always
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+
+failed_conditions:
+ description: The list of conditionals that have failed
+ returned: failed
+ type: list
+ sample: ['...', '...']
+
+warnings:
+ description: The list of warnings (if any) generated by module based on arguments
+ returned: always
+ type: list
+ sample: ['...', '...']
+"""
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcli import CommandRunner, FailedConditionsError
+from ansible.module_utils.network import NetworkModule, NetworkError
+import ansible.module_utils.dellos6
+
+def to_lines(stdout):
+ for item in stdout:
+ if isinstance(item, basestring):
+ item = str(item).split('\n')
+ yield item
+
+
+def main():
+ spec = dict(
+ commands=dict(type='list', required=True),
+ wait_for=dict(type='list'),
+ retries=dict(default=10, type='int'),
+ interval=dict(default=1, type='int')
+ )
+
+ module = NetworkModule(argument_spec=spec,
+ connect_on_load=False,
+ supports_check_mode=True)
+
+ commands = module.params['commands']
+ conditionals = module.params['wait_for'] or list()
+
+ warnings = list()
+
+ runner = CommandRunner(module)
+
+ for cmd in commands:
+ if module.check_mode and not cmd.startswith('show'):
+ warnings.append('only show commands are supported when using '
+ 'check mode, not executing `%s`' % cmd)
+ else:
+ if cmd.startswith('conf'):
+ module.fail_json(msg='dellos6_command does not support running '
+ 'config mode commands. Please use '
+ 'dellos6_config instead')
+ runner.add_command(cmd)
+
+ for item in conditionals:
+ runner.add_conditional(item)
+
+ runner.retries = module.params['retries']
+ runner.interval = module.params['interval']
+
+ try:
+ runner.run()
+ except FailedConditionsError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+
+ result = dict(changed=False)
+
+ result['stdout'] = list()
+ for cmd in commands:
+ try:
+ output = runner.get_command(cmd)
+ except ValueError:
+ output = 'command not executed due to check_mode, see warnings'
+ result['stdout'].append(output)
+
+ result['warnings'] = warnings
+ result['stdout_lines'] = list(to_lines(result['stdout']))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/dellos6/dellos6_config.py b/network/dellos6/dellos6_config.py
new file mode 100644
index 00000000000..e2c4743319e
--- /dev/null
+++ b/network/dellos6/dellos6_config.py
@@ -0,0 +1,285 @@
+#!/usr/bin/python
+#
+# (c) 2015 Peter Sprygada,
+#
+# Copyright (c) 2016 Dell Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: dellos6_config
+version_added: "2.2"
+author: "Abirami N(@abirami-n)"
+short_description: Manage Dell OS6 configuration sections
+description:
+ - Dell OS6 configurations use a simple block indent file syntax
+ for segmenting configuration into sections. This module provides
+ an implementation for working with Dell OS6 configuration sections in
+ a deterministic way.
+extends_documentation_fragment: dellos6
+options:
+ lines:
+ description:
+ - The ordered set of commands that should be configured in the
+ section. The commands must be the exact same commands as found
+ in the device running-config. Be sure to note the configuration
+ command syntax as some commands are automatically modified by the
+ device config parser. This argument is mutually exclusive with I(src).
+ required: false
+ default: null
+ aliases: ['commands']
+ parents:
+ description:
+ - The ordered set of parents that uniquely identify the section
+ the commands should be checked against. If the parents argument
+ is omitted, the commands are checked against the set of top
+ level or global commands.
+ required: false
+ default: null
+ src:
+ description:
+ - Specifies the source path to the file that contains the configuration
+ or configuration template to load. The path to the source file can
+ either be the full path on the Ansible control host or a relative
+ path from the playbook or role root dir. This argument is mutually
+ exclusive with I(lines).
+ required: false
+ default: null
+ before:
+ description:
+ - The ordered set of commands to push on to the command stack if
+ a change needs to be made. This allows the playbook designer
+ the opportunity to perform configuration commands prior to pushing
+ any changes without affecting how the set of commands are matched
+ against the system.
+ required: false
+ default: null
+ after:
+ description:
+ - The ordered set of commands to append to the end of the command
+ stack if a change needs to be made. Just like with I(before) this
+ allows the playbook designer to append a set of commands to be
+ executed after the command set.
+ required: false
+ default: null
+ match:
+ description:
+ - Instructs the module on the way to perform the matching of
+ the set of commands against the current device config. If
+ match is set to I(line), commands are matched line by line. If
+ match is set to I(strict), command lines are matched with respect
+ to position. If match is set to I(exact), command lines
+ must be an equal match. Finally, if match is set to I(none), the
+ module will not attempt to compare the source configuration with
+ the running configuration on the remote device.
+ required: false
+ default: line
+ choices: ['line', 'strict', 'exact', 'none']
+ replace:
+ description:
+ - Instructs the module on the way to perform the configuration
+ on the device. If the replace argument is set to I(line) then
+ the modified lines are pushed to the device in configuration
+ mode. If the replace argument is set to I(block) then the entire
+ command block is pushed to the device in configuration mode if any
+ line is not correct.
+ required: false
+ default: line
+ choices: ['line', 'block']
+ update:
+ description:
+ - The I(update) argument controls how the configuration statements
+ are processed on the remote device. Valid choices for the I(update)
+ argument are I(merge) and I(check). When the argument is set to
+ I(merge), the configuration changes are merged with the current
+ device running configuration. When the argument is set to I(check)
+ the configuration updates are determined but not actually configured
+ on the remote device.
+ required: false
+ default: merge
+ choices: ['merge', 'check']
+ save:
+ description:
+ - The C(save) argument instructs the module to save the running-
+ config to the startup-config at the conclusion of the module
+ running. If check mode is specified, this argument is ignored.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ config:
+ description:
+ - The C(config) argument allows the playbook designer to supply
+ the base configuration to be used to validate configuration
+ changes necessary. If this argument is provided, the module
+ will not download the running-config from the remote node.
+ required: false
+ default: null
+ backup:
+ description:
+ - This argument will cause the module to create a full backup of
+ the current C(running-config) from the remote device before any
+ changes are made. The backup file is written to the C(backup)
+ folder in the playbook root directory. If the directory does not
+ exist, it is created.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+"""
+
+EXAMPLES = """
+- dellos6_config:
+ lines: ['hostname {{ inventory_hostname }}']
+ provider: "{{ cli }}"
+
+- dellos6_config:
+ lines:
+ - 10 permit ip 1.1.1.1 any log
+ - 20 permit ip 2.2.2.2 any log
+ - 30 permit ip 3.3.3.3 any log
+ - 40 permit ip 4.4.4.4 any log
+ - 50 permit ip 5.5.5.5 any log
+ parents: ['ip access-list test']
+ before: ['no ip access-list test']
+ match: exact
+ provider: "{{ cli }}"
+
+- dellos6_config:
+ lines:
+ - 10 permit ip 1.1.1.1 any log
+ - 20 permit ip 2.2.2.2 any log
+ - 30 permit ip 3.3.3.3 any log
+ - 40 permit ip 4.4.4.4 any log
+ parents: ['ip access-list test']
+ before: ['no ip access-list test']
+ replace: block
+ provider: "{{ cli }}"
+
+"""
+
+RETURN = """
+updates:
+ description: The set of commands that will be pushed to the remote device
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+responses:
+ description: The set of responses from issuing the commands on the device
+ returned: when not check_mode
+ type: list
+ sample: ['...', '...']
+
+saved:
+ description: Returns whether the configuration is saved to the startup
+ configuration or not.
+ returned: when not check_mode
+ type: bool
+ sample: True
+
+"""
+from ansible.module_utils.netcfg import dumps
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.dellos6 import get_config, get_sublevel_config, Dellos6NetworkConfig
+
+
+def get_candidate(module):
+ candidate = Dellos6NetworkConfig(indent=0)
+ if module.params['src']:
+ candidate.load(module.params['src'])
+ elif module.params['lines']:
+ parents = module.params['parents'] or list()
+ candidate.add(module.params['lines'], parents=parents)
+ return candidate
+
+
+def main():
+
+ argument_spec = dict(
+ lines=dict(aliases=['commands'], type='list'),
+ parents=dict(type='list'),
+
+ src=dict(type='path'),
+
+ before=dict(type='list'),
+ after=dict(type='list'),
+
+ match=dict(default='line',
+ choices=['line', 'strict', 'exact', 'none']),
+ replace=dict(default='line', choices=['line', 'block']),
+ update=dict(choices=['merge', 'check'], default='merge'),
+ save=dict(type='bool', default=False),
+ config=dict(),
+ backup=dict(type='bool', default=False)
+ )
+
+ mutually_exclusive = [('lines', 'src')]
+
+ module = NetworkModule(argument_spec=argument_spec,
+ connect_on_load=False,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
+
+ parents = module.params['parents'] or list()
+
+ match = module.params['match']
+ replace = module.params['replace']
+ result = dict(changed=False, saved=False)
+ candidate = get_candidate(module)
+
+ if match != 'none':
+ config = get_config(module)
+ if parents:
+ config = get_sublevel_config(config, module)
+ configobjs = candidate.difference(config, match=match, replace=replace)
+ else:
+ configobjs = candidate.items
+
+ if module.params['backup']:
+ result['__backup__'] = module.cli('show running-config')[0]
+
+ commands = list()
+ if configobjs:
+ commands = dumps(configobjs, 'commands')
+ commands = commands.split('\n')
+
+ if module.params['before']:
+ commands[:0] = module.params['before']
+
+ if module.params['after']:
+ commands.extend(module.params['after'])
+
+ if not module.check_mode and module.params['update'] == 'merge':
+ response = module.config.load_config(commands)
+ result['responses'] = response
+
+ if module.params['save']:
+ module.config.save_config()
+ result['saved'] = True
+
+ result['changed'] = True
+
+ result['updates'] = commands
+
+ module.exit_json(**result)
+
+if __name__ == '__main__':
+ main()
diff --git a/network/dellos6/dellos6_facts.py b/network/dellos6/dellos6_facts.py
new file mode 100644
index 00000000000..bfb82fbc6f5
--- /dev/null
+++ b/network/dellos6/dellos6_facts.py
@@ -0,0 +1,445 @@
+#!/usr/bin/python
+#
+# (c) 2015 Peter Sprygada,
+#
+# Copyright (c) 2016 Dell Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: dellos6_facts
+version_added: "2.2"
+author: "Abirami N(@abirami-n)"
+short_description: Collect facts from remote devices running Dell OS6
+description:
+ - Collects a base set of device facts from a remote device that
+ is running OS6. This module prepends all of the
+ base network fact keys with C(ansible_net_). The facts
+ module will always collect a base set of facts from the device
+ and can enable or disable collection of additional facts.
+extends_documentation_fragment: dellos6
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will restrict the facts collected
+ to a given subset. Possible values for this argument include
+ all, hardware, config, and interfaces. Can specify a list of
+ values to include a larger subset. Values can also be used
+ with an initial C(M(!)) to specify that a specific subset should
+ not be collected.
+ required: false
+ default: '!config'
+"""
+
+EXAMPLES = """
+# Collect all facts from the device
+- dellos6_facts:
+ gather_subset: all
+
+# Collect only the config and default facts
+- dellos6_facts:
+ gather_subset:
+ - config
+
+# Do not collect hardware facts
+- dellos6_facts:
+ gather_subset:
+ - "!interfaces"
+"""
+
+RETURN = """
+ansible_net_gather_subset:
+ description: The list of fact subsets collected from the device
+ returned: always
+ type: list
+
+# default
+ansible_net_model:
+ description: The model name returned from the device
+ returned: always
+ type: str
+ansible_net_serialnum:
+ description: The serial number of the remote device
+ returned: always
+ type: str
+ansible_net_version:
+ description: The operating system version running on the remote device
+ returned: always
+ type: str
+ansible_net_hostname:
+ description: The configured hostname of the device
+ returned: always
+ type: string
+ansible_net_image:
+ description: The image file the device is running
+ returned: always
+ type: string
+
+# hardware
+ansible_net_memfree_mb:
+ description: The available free memory on the remote device in Mb
+ returned: when hardware is configured
+ type: int
+ansible_net_memtotal_mb:
+ description: The total memory on the remote device in Mb
+ returned: when hardware is configured
+ type: int
+
+# config
+ansible_net_config:
+ description: The current active config from the device
+ returned: when config is configured
+ type: str
+
+# interfaces
+ansible_net_interfaces:
+ description: A hash of all interfaces running on the system
+ returned: when interfaces is configured
+ type: dict
+ansible_net_neighbors:
+ description: The list of LLDP neighbors from the remote device
+ returned: when interfaces is configured
+ type: dict
+
+"""
+import re
+
+from ansible.module_utils.netcli import CommandRunner
+from ansible.module_utils.network import NetworkModule
+import ansible.module_utils.dellos6
+
+class FactsBase(object):
+
+ def __init__(self, runner):
+ self.runner = runner
+ self.facts = dict()
+
+ self.commands()
+
+class Default(FactsBase):
+
+ def commands(self):
+ self.runner.add_command('show version')
+ self.runner.add_command('show running-config | include hostname')
+
+ def populate(self):
+ data = self.runner.get_command('show version')
+ self.facts['version'] = self.parse_version(data)
+ self.facts['serialnum'] = self.parse_serialnum(data)
+ self.facts['model'] = self.parse_model(data)
+ self.facts['image'] = self.parse_image(data)
+ hdata =self.runner.get_command('show running-config | include hostname')
+ self.facts['hostname'] = self.parse_hostname(hdata)
+
+ def parse_version(self, data):
+ match = re.search(r'HW Version(.+)\s(\d+)', data)
+ if match:
+ return match.group(2)
+
+ def parse_hostname(self, data):
+ match = re.search(r'\S+\s(\S+)', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_model(self, data):
+ match = re.search(r'System Model ID(.+)\s([A-Z0-9]*)\n', data, re.M)
+ if match:
+ return match.group(2)
+
+ def parse_image(self, data):
+ match = re.search(r'Image File(.+)\s([A-Z0-9a-z_.]*)\n', data)
+ if match:
+ return match.group(2)
+
+ def parse_serialnum(self, data):
+ match = re.search(r'Serial Number(.+)\s([A-Z0-9]*)\n', data)
+ if match:
+ return match.group(2)
+
+
+class Hardware(FactsBase):
+
+ def commands(self):
+ self.runner.add_command('show memory cpu')
+
+ def populate(self):
+
+ data = self.runner.get_command('show memory cpu')
+ match = re.findall('\s(\d+)\s', data)
+ if match:
+ self.facts['memtotal_mb'] = int(match[0]) / 1024
+ self.facts['memfree_mb'] = int(match[1]) / 1024
+
+
+class Config(FactsBase):
+
+ def commands(self):
+ self.runner.add_command('show running-config')
+
+ def populate(self):
+ self.facts['config'] = self.runner.get_command('show running-config')
+
+
+class Interfaces(FactsBase):
+ def commands(self):
+ self.runner.add_command('show interfaces')
+ self.runner.add_command('show interfaces status')
+ self.runner.add_command('show interfaces transceiver properties')
+ self.runner.add_command('show ip int')
+ self.runner.add_command('show lldp')
+ self.runner.add_command('show lldp remote-device all')
+
+ def populate(self):
+ vlan_info = dict()
+ data = self.runner.get_command('show interfaces')
+ interfaces = self.parse_interfaces(data)
+ desc = self.runner.get_command('show interfaces status')
+ properties = self.runner.get_command('show interfaces transceiver properties')
+ vlan = self.runner.get_command('show ip int')
+ vlan_info = self.parse_vlan(vlan)
+ self.facts['interfaces'] = self.populate_interfaces(interfaces,desc,properties)
+ self.facts['interfaces'].update(vlan_info)
+ if 'LLDP is not enabled' not in self.runner.get_command('show lldp'):
+ neighbors = self.runner.get_command('show lldp remote-device all')
+ self.facts['neighbors'] = self.parse_neighbors(neighbors)
+
+ def parse_vlan(self,vlan):
+ facts =dict()
+ vlan_info, vlan_info_next = vlan.split('---------- ----- --------------- --------------- -------')
+ for en in vlan_info_next.splitlines():
+ if en == '':
+ continue
+ match = re.search('^(\S+)\s+(\S+)\s+(\S+)', en)
+ intf = match.group(1)
+ if intf not in facts:
+ facts[intf] = list()
+ fact = dict()
+ matc=re.search('^([\w+\s\d]*)\s+(\S+)\s+(\S+)',en)
+ fact['address'] = matc.group(2)
+ fact['masklen'] = matc.group(3)
+ facts[intf].append(fact)
+ return facts
+
+ def populate_interfaces(self, interfaces, desc, properties):
+ facts = dict()
+ for key, value in interfaces.iteritems():
+ intf = dict()
+ intf['description'] = self.parse_description(key,desc)
+ intf['macaddress'] = self.parse_macaddress(value)
+ intf['mtu'] = self.parse_mtu(value)
+ intf['bandwidth'] = self.parse_bandwidth(value)
+ intf['mediatype'] = self.parse_mediatype(key,properties)
+ intf['duplex'] = self.parse_duplex(value)
+ intf['lineprotocol'] = self.parse_lineprotocol(value)
+ intf['operstatus'] = self.parse_operstatus(value)
+ intf['type'] = self.parse_type(key,properties)
+ facts[key] = intf
+ return facts
+
+ def parse_neighbors(self, neighbors):
+ facts = dict()
+ neighbor, neighbor_next = neighbors.split('--------- ------- ------------------- ----------------- -----------------')
+ for en in neighbor_next.splitlines():
+ if en == '':
+ continue
+ intf = self.parse_lldp_intf(en.split()[0])
+ if intf not in facts:
+ facts[intf] = list()
+ fact = dict()
+ fact['host'] = self.parse_lldp_host(en.split()[4])
+ fact['port'] = self.parse_lldp_port(en.split()[3])
+ facts[intf].append(fact)
+
+ return facts
+
+ def parse_interfaces(self, data):
+ parsed = dict()
+ for line in data.split('\n'):
+ if len(line) == 0:
+ continue
+ else:
+ match = re.match(r'Interface Name(.+)\s([A-Za-z0-9/]*)', line)
+ if match:
+ key = match.group(2)
+ parsed[key] = line
+ else:
+ parsed[key] += '\n%s' % line
+ return parsed
+
+ def parse_description(self, key, desc):
+ desc, desc_next = desc.split('--------- --------------- ------ ------- ---- ------ ----- -- -------------------')
+ desc_val, desc_info = desc_next.split('Oob')
+ for en in desc_val.splitlines():
+ if key in en:
+ match = re.search('^(\S+)\s+(\S+)', en)
+ if match.group(2) in ['Full','N/A']:
+ return "Null"
+ else:
+ return match.group(2)
+
+ def parse_macaddress(self, data):
+ match = re.search(r'Burned MAC Address(.+)\s([A-Z0-9.]*)\n', data)
+ if match:
+ return match.group(2)
+
+ def parse_mtu(self, data):
+ match = re.search(r'MTU Size(.+)\s(\d+)\n', data)
+ if match:
+ return int(match.group(2))
+
+ def parse_bandwidth(self, data):
+ match = re.search(r'Port Speed(.+)\s(\d+)\n', data)
+ if match:
+ return int(match.group(2))
+
+ def parse_duplex(self, data):
+ match = re.search(r'Port Mode\s([A-Za-z]*)(.+)\s([A-Za-z/]*)\n', data)
+ if match:
+ return match.group(3)
+
+ def parse_mediatype(self, key, properties):
+ mediatype, mediatype_next = properties.split('--------- ------- --------------------- --------------------- --------------')
+ flag=1
+ for en in mediatype_next.splitlines():
+ if key in en:
+ flag=0
+ match = re.search('^(\S+)\s+(\S+)\s+(\S+)',en)
+ if match:
+ strval = match.group(3)
+ return match.group(3)
+ if flag==1:
+ return "null"
+
+ def parse_type(self, key, properties):
+ type_val, type_val_next = properties.split('--------- ------- --------------------- --------------------- --------------')
+ flag=1
+ for en in type_val_next.splitlines():
+ if key in en:
+ flag=0
+ match = re.search('^(\S+)\s+(\S+)\s+(\S+)',en)
+ if match:
+ strval = match.group(2)
+ return match.group(2)
+ if flag==1:
+ return "null"
+
+ def parse_lineprotocol(self, data):
+ match = re.search(r'Link Status.*\s(\S+)\s+(\S+)\n', data)
+ if match:
+ strval= match.group(2)
+ return strval.strip('/')
+
+ def parse_operstatus(self, data):
+ match = re.search(r'Link Status.*\s(\S+)\s+(\S+)\n', data)
+ if match:
+ return match.group(1)
+
+ def parse_lldp_intf(self, data):
+ match = re.search(r'^([A-Za-z0-9/]*)', data)
+ if match:
+ return match.group(1)
+
+ def parse_lldp_host(self, data):
+ match = re.search(r'^([A-Za-z0-9]*)', data)
+ if match:
+ return match.group(1)
+
+ def parse_lldp_port(self, data):
+ match = re.search(r'^([A-Za-z0-9/]*)', data)
+ if match:
+ return match.group(1)
+
+
+FACT_SUBSETS = dict(
+ default=Default,
+ hardware=Hardware,
+ interfaces=Interfaces,
+ config=Config,
+)
+
+VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
+
+def main():
+ spec = dict(
+ gather_subset=dict(default=['!config'], type='list')
+ )
+ module = NetworkModule(argument_spec=spec, supports_check_mode=True)
+
+ gather_subset = module.params['gather_subset']
+
+ runable_subsets = set()
+ exclude_subsets = set()
+
+ for subset in gather_subset:
+ if subset == 'all':
+ runable_subsets.update(VALID_SUBSETS)
+ continue
+
+ if subset.startswith('!'):
+ subset = subset[1:]
+ if subset == 'all':
+ exclude_subsets.update(VALID_SUBSETS)
+ continue
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in VALID_SUBSETS:
+ module.fail_json(msg='Bad subset')
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(VALID_SUBSETS)
+
+ runable_subsets.difference_update(exclude_subsets)
+ runable_subsets.add('default')
+
+ facts = dict()
+ facts['gather_subset'] = list(runable_subsets)
+
+ runner = CommandRunner(module)
+ instances = list()
+ for key in runable_subsets:
+ instances.append(FACT_SUBSETS[key](runner))
+ runner.run()
+
+ try:
+ for inst in instances:
+ inst.populate()
+ facts.update(inst.facts)
+ except Exception:
+ module.exit_json(out=module.from_json(runner.items))
+
+ ansible_facts = dict()
+ for key, value in facts.iteritems():
+ key = 'ansible_net_%s' % key
+ ansible_facts[key] = value
+
+ module.exit_json(ansible_facts=ansible_facts)
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/network/dellos9/__init__.py b/network/dellos9/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/network/dellos9/dellos9_command.py b/network/dellos9/dellos9_command.py
new file mode 100755
index 00000000000..fcd70f0c35e
--- /dev/null
+++ b/network/dellos9/dellos9_command.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+#
+# (c) 2015 Peter Sprygada,
+#
+# Copyright (c) 2016 Dell Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: dellos9_command
+version_added: "2.2"
+author: "Dhivya P (@dhivyap)"
+short_description: Run commands on remote devices running Dell OS9
+description:
+ - Sends arbitrary commands to a Dell OS9 node and returns the results
+ read from the device. This module includes an
+ argument that will cause the module to wait for a specific condition
+ before returning or timing out if the condition is not met.
+ - This module does not support running commands in configuration mode.
+ Please use M(dellos9_config) to configure Dell OS9 devices.
+extends_documentation_fragment: dellos9
+options:
+ commands:
+ description:
+ - List of commands to send to the remote dellos9 device over the
+ configured provider. The resulting output from the command
+ is returned. If the I(wait_for) argument is provided, the
+ module is not returned until the condition is satisfied or
+ the number of retries has expired.
+ required: true
+ wait_for:
+ description:
+ - List of conditions to evaluate against the output of the
+ command. The task will wait for each condition to be true
+ before moving forward. If the conditional is not true
+ within the configured number of I(retries), the task fails.
+ See examples.
+ required: false
+ default: null
+ retries:
+ description:
+ - Specifies the number of retries a command should be tried
+ before it is considered failed. The command is run on the
+ target device every retry and evaluated against the
+ I(wait_for) conditions.
+ required: false
+ default: 10
+ interval:
+ description:
+ - Configures the interval in seconds to wait between retries
+ of the command. If the command does not pass the specified
+ conditions, the interval indicates how long to wait before
+ trying the command again.
+ required: false
+ default: 1
+
+notes:
+ - This module requires Dell OS9 version 9.10.0.1P13 or above.
+
+ - This module requires to increase the ssh connection rate limit.
+ Use the following command I(ip ssh connection-rate-limit 60)
+ to configure the same. This can be done via M(dnos_config) module
+ as well.
+
+"""
+
+EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: admin
+ password: admin
+ transport: cli
+
+tasks:
+ - name: run show version on remote devices
+ dellos9_command:
+ commands: show version
+ provider: "{{ cli }}"
+
+ - name: run show version and check to see if output contains OS9
+ dellos9_command:
+ commands: show version
+ wait_for: result[0] contains OS9
+ provider: "{{ cli }}"
+
+ - name: run multiple commands on remote nodes
+ dellos9_command:
+ commands:
+ - show version
+ - show interfaces
+ provider: "{{ cli }}"
+
+ - name: run multiple commands and evaluate the output
+ dellos9_command:
+ commands:
+ - show version
+ - show interfaces
+ wait_for:
+ - result[0] contains OS9
+ - result[1] contains Loopback
+ provider: "{{ cli }}"
+"""
+
+RETURN = """
+stdout:
+ description: The set of responses from the commands
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+stdout_lines:
+ description: The value of stdout split into a list
+ returned: always
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+
+failed_conditions:
+ description: The list of conditionals that have failed
+ returned: failed
+ type: list
+ sample: ['...', '...']
+
+warnings:
+ description: The list of warnings (if any) generated by module based on arguments
+ returned: always
+ type: list
+ sample: ['...', '...']
+"""
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcli import CommandRunner, FailedConditionsError
+from ansible.module_utils.network import NetworkModule, NetworkError
+import ansible.module_utils.dellos9
+
+
+def to_lines(stdout):
+ for item in stdout:
+ if isinstance(item, basestring):
+ item = str(item).split('\n')
+ yield item
+
+
+def main():
+ spec = dict(
+ commands=dict(type='list', required=True),
+ wait_for=dict(type='list'),
+ retries=dict(default=10, type='int'),
+ interval=dict(default=1, type='int')
+ )
+
+ module = NetworkModule(argument_spec=spec,
+ connect_on_load=False,
+ supports_check_mode=True)
+
+ commands = module.params['commands']
+ conditionals = module.params['wait_for'] or list()
+
+ warnings = list()
+
+ runner = CommandRunner(module)
+
+ for cmd in commands:
+ if module.check_mode and not cmd.startswith('show'):
+ warnings.append('only show commands are supported when using '
+ 'check mode, not executing `%s`' % cmd)
+ else:
+ if cmd.startswith('conf'):
+ module.fail_json(msg='dellos9_command does not support running '
+ 'config mode commands. Please use '
+ 'dellos9_config instead')
+ runner.add_command(cmd)
+
+ for item in conditionals:
+ runner.add_conditional(item)
+
+ runner.retries = module.params['retries']
+ runner.interval = module.params['interval']
+
+ try:
+ runner.run()
+ except FailedConditionsError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+
+ result = dict(changed=False)
+
+ result['stdout'] = list()
+ for cmd in commands:
+ try:
+ output = runner.get_command(cmd)
+ except ValueError:
+ output = 'command not executed due to check_mode, see warnings'
+ result['stdout'].append(output)
+
+ result['warnings'] = warnings
+ result['stdout_lines'] = list(to_lines(result['stdout']))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/dellos9/dellos9_config.py b/network/dellos9/dellos9_config.py
new file mode 100755
index 00000000000..bee90ee1c3f
--- /dev/null
+++ b/network/dellos9/dellos9_config.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+#
+# (c) 2015 Peter Sprygada,
+#
+# Copyright (c) 2016 Dell Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: dellos9_config
+version_added: "2.2"
+author: "Dhivya P (@dhivyap)"
+short_description: Manage Dell OS9 configuration sections
+description:
+ - Dell OS9 configurations use a simple block indent file syntax
+ for segmenting configuration into sections. This module provides
+ an implementation for working with Dell OS9 configuration sections in
+ a deterministic way.
+extends_documentation_fragment: dellos9
+options:
+ lines:
+ description:
+ - The ordered set of commands that should be configured in the
+ section. The commands must be the exact same commands as found
+ in the device running-config. Be sure to note the configuration
+ command syntax as some commands are automatically modified by the
+ device config parser. This argument is mutually exclusive with I(src).
+ required: false
+ default: null
+ aliases: ['commands']
+ parents:
+ description:
+ - The ordered set of parents that uniquely identify the section
+ the commands should be checked against. If the parents argument
+ is omitted, the commands are checked against the set of top
+ level or global commands.
+ required: false
+ default: null
+ src:
+ description:
+ - Specifies the source path to the file that contains the configuration
+ or configuration template to load. The path to the source file can
+ either be the full path on the Ansible control host or a relative
+ path from the playbook or role root dir. This argument is mutually
+ exclusive with I(lines).
+ required: false
+ default: null
+ before:
+ description:
+ - The ordered set of commands to push on to the command stack if
+ a change needs to be made. This allows the playbook designer
+ the opportunity to perform configuration commands prior to pushing
+ any changes without affecting how the set of commands are matched
+ against the system.
+ required: false
+ default: null
+ after:
+ description:
+ - The ordered set of commands to append to the end of the command
+ stack if a change needs to be made. Just like with I(before) this
+ allows the playbook designer to append a set of commands to be
+ executed after the command set.
+ required: false
+ default: null
+ match:
+ description:
+ - Instructs the module on the way to perform the matching of
+ the set of commands against the current device config. If
+ match is set to I(line), commands are matched line by line. If
+ match is set to I(strict), command lines are matched with respect
+ to position. If match is set to I(exact), command lines
+ must be an equal match. Finally, if match is set to I(none), the
+ module will not attempt to compare the source configuration with
+ the running configuration on the remote device.
+ required: false
+ default: line
+ choices: ['line', 'strict', 'exact', 'none']
+ replace:
+ description:
+ - Instructs the module on the way to perform the configuration
+ on the device. If the replace argument is set to I(line) then
+ the modified lines are pushed to the device in configuration
+ mode. If the replace argument is set to I(block) then the entire
+ command block is pushed to the device in configuration mode if any
+ line is not correct.
+ required: false
+ default: line
+ choices: ['line', 'block']
+ update:
+ description:
+ - The I(update) argument controls how the configuration statements
+ are processed on the remote device. Valid choices for the I(update)
+ argument are I(merge) and I(check). When the argument is set to
+ I(merge), the configuration changes are merged with the current
+ device running configuration. When the argument is set to I(check)
+ the configuration updates are determined but not actually configured
+ on the remote device.
+ required: false
+ default: merge
+ choices: ['merge', 'check']
+ save:
+ description:
+ - The C(save) argument instructs the module to save the running-
+ config to the startup-config at the conclusion of the module
+ running. If check mode is specified, this argument is ignored.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ config:
+ description:
+ - The C(config) argument allows the playbook designer to supply
+ the base configuration to be used to validate configuration
+ changes necessary. If this argument is provided, the module
+ will not download the running-config from the remote node.
+ required: false
+ default: null
+ backup:
+ description:
+ - This argument will cause the module to create a full backup of
+ the current C(running-config) from the remote device before any
+ changes are made. The backup file is written to the C(backup)
+ folder in the playbook root directory. If the directory does not
+ exist, it is created.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+
+notes:
+ - This module requires Dell OS9 version 9.10.0.1P13 or above.
+
+ - This module requires to increase the ssh connection rate limit.
+ Use the following command I(ip ssh connection-rate-limit 60)
+ to configure the same. This can be done via M(dnos_config) module
+ as well.
+"""
+
+EXAMPLES = """
+- dellos9_config:
+ lines: ['hostname {{ inventory_hostname }}']
+ provider: "{{ cli }}"
+
+- dellos9_config:
+ lines:
+ - 10 permit ip host 1.1.1.1 any log
+ - 20 permit ip host 2.2.2.2 any log
+ - 30 permit ip host 3.3.3.3 any log
+ - 40 permit ip host 4.4.4.4 any log
+ - 50 permit ip host 5.5.5.5 any log
+ parents: ['ip access-list extended test']
+ before: ['no ip access-list extended test']
+ match: exact
+ provider: "{{ cli }}"
+
+- dellos9_config:
+ lines:
+ - 10 permit ip host 1.1.1.1 any log
+ - 20 permit ip host 2.2.2.2 any log
+ - 30 permit ip host 3.3.3.3 any log
+ - 40 permit ip host 4.4.4.4 any log
+ parents: ['ip access-list extended test']
+ before: ['no ip access-list extended test']
+ replace: block
+ provider: "{{ cli }}"
+
+"""
+
+RETURN = """
+updates:
+ description: The set of commands that will be pushed to the remote device
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+responses:
+ description: The set of responses from issuing the commands on the device
+ returned: when not check_mode
+ type: list
+ sample: ['...', '...']
+
+saved:
+ description: Returns whether the configuration is saved to the startup
+ configuration or not.
+ returned: when not check_mode
+ type: bool
+ sample: True
+
+"""
+from ansible.module_utils.netcfg import NetworkConfig, dumps
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.dellos9 import get_config, get_sublevel_config
+
+
+def get_candidate(module):
+ candidate = NetworkConfig(indent=1)
+ if module.params['src']:
+ candidate.load(module.params['src'])
+ elif module.params['lines']:
+ parents = module.params['parents'] or list()
+ candidate.add(module.params['lines'], parents=parents)
+ return candidate
+
+
+def main():
+
+ argument_spec = dict(
+ lines=dict(aliases=['commands'], type='list'),
+ parents=dict(type='list'),
+
+ src=dict(type='path'),
+
+ before=dict(type='list'),
+ after=dict(type='list'),
+
+ match=dict(default='line',
+ choices=['line', 'strict', 'exact', 'none']),
+ replace=dict(default='line', choices=['line', 'block']),
+
+ update=dict(choices=['merge', 'check'], default='merge'),
+ save=dict(type='bool', default=False),
+ config=dict(),
+ backup=dict(type='bool', default=False)
+ )
+
+ mutually_exclusive = [('lines', 'src')]
+
+ module = NetworkModule(argument_spec=argument_spec,
+ connect_on_load=False,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
+
+ parents = module.params['parents'] or list()
+
+ match = module.params['match']
+ replace = module.params['replace']
+ result = dict(changed=False, saved=False)
+
+ candidate = get_candidate(module)
+
+ if match != 'none':
+ config = get_config(module)
+ if parents:
+ contents = get_sublevel_config(config, module)
+ config = NetworkConfig(contents=contents, indent=1)
+ configobjs = candidate.difference(config, match=match, replace=replace)
+
+ else:
+ configobjs = candidate.items
+
+ if module.params['backup']:
+ result['__backup__'] = module.cli('show running-config')[0]
+
+ commands = list()
+ if configobjs:
+ commands = dumps(configobjs, 'commands')
+ commands = commands.split('\n')
+
+ if module.params['before']:
+ commands[:0] = module.params['before']
+
+ if module.params['after']:
+ commands.extend(module.params['after'])
+
+ if not module.check_mode and module.params['update'] == 'merge':
+ response = module.config.load_config(commands)
+ result['responses'] = response
+
+ if module.params['save']:
+ module.config.save_config()
+ result['saved'] = True
+
+ result['changed'] = True
+
+ result['updates'] = commands
+
+ module.exit_json(**result)
+
+if __name__ == '__main__':
+ main()
diff --git a/network/dellos9/dellos9_facts.py b/network/dellos9/dellos9_facts.py
new file mode 100644
index 00000000000..fe752ac373f
--- /dev/null
+++ b/network/dellos9/dellos9_facts.py
@@ -0,0 +1,567 @@
+#!/usr/bin/python
+#
+# (c) 2015 Peter Sprygada,
+#
+# Copyright (c) 2016 Dell Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: dellos9_facts
+version_added: "2.2"
+author: "Dhivya P (@dhivyap)"
+short_description: Collect facts from remote devices running Dell OS9
+description:
+ - Collects a base set of device facts from a remote device that
+ is running Dell OS9. This module prepends all of the
+ base network fact keys with C(ansible_net_). The facts
+ module will always collect a base set of facts from the device
+ and can enable or disable collection of additional facts.
+extends_documentation_fragment: dellos9
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will restrict the facts collected
+ to a given subset. Possible values for this argument include
+ all, hardware, config, and interfaces. Can specify a list of
+ values to include a larger subset. Values can also be used
+ with an initial C(M(!)) to specify that a specific subset should
+ not be collected.
+ required: false
+ default: '!config'
+notes:
+ - This module requires Dell OS9 version 9.10.0.1P13 or above.
+
+ - This module requires to increase the ssh connection rate limit.
+ Use the following command I(ip ssh connection-rate-limit 60)
+ to configure the same. This can be done via M(dnos_config) module
+ as well.
+"""
+
+EXAMPLES = """
+# Collect all facts from the device
+- dellos9_facts:
+ gather_subset: all
+
+# Collect only the config and default facts
+- dellos9_facts:
+ gather_subset:
+ - config
+
+# Do not collect hardware facts
+- dellos9_facts:
+ gather_subset:
+ - "!hardware"
+"""
+
+RETURN = """
+ansible_net_gather_subset:
+ description: The list of fact subsets collected from the device
+ returned: always
+ type: list
+
+# default
+ansible_net_model:
+ description: The model name returned from the device
+ returned: always
+ type: str
+ansible_net_serialnum:
+ description: The serial number of the remote device
+ returned: always
+ type: str
+ansible_net_version:
+ description: The operating system version running on the remote device
+ returned: always
+ type: str
+ansible_net_hostname:
+ description: The configured hostname of the device
+ returned: always
+ type: string
+ansible_net_image:
+ description: The image file the device is running
+ returned: always
+ type: string
+
+# hardware
+ansible_net_filesystems:
+ description: All file system names available on the device
+ returned: when hardware is configured
+ type: list
+ansible_net_memfree_mb:
+ description: The available free memory on the remote device in Mb
+ returned: when hardware is configured
+ type: int
+ansible_net_memtotal_mb:
+ description: The total memory on the remote device in Mb
+ returned: when hardware is configured
+ type: int
+
+# config
+ansible_net_config:
+ description: The current active config from the device
+ returned: when config is configured
+ type: str
+
+# interfaces
+ansible_net_all_ipv4_addresses:
+ description: All IPv4 addresses configured on the device
+ returned: when interfaces is configured
+ type: list
+ansible_net_all_ipv6_addresses:
+ description: All IPv6 addresses configured on the device
+ returned: when interfaces is configured
+ type: list
+ansible_net_interfaces:
+ description: A hash of all interfaces running on the system
+ returned: when interfaces is configured
+ type: dict
+ansible_net_neighbors:
+ description: The list of LLDP neighbors from the remote device
+ returned: when interfaces is configured
+ type: dict
+"""
+import re
+import itertools
+
+from ansible.module_utils.netcli import CommandRunner
+from ansible.module_utils.network import NetworkModule
+import ansible.module_utils.dellos9
+
+
+class FactsBase(object):
+
+ def __init__(self, runner):
+ self.runner = runner
+ self.facts = dict()
+
+ self.commands()
+
+
+class Default(FactsBase):
+
+ def commands(self):
+ self.runner.add_command('show version')
+ self.runner.add_command('show inventory')
+ self.runner.add_command('show running-config | grep hostname')
+
+ def populate(self):
+ data = self.runner.get_command('show version')
+ self.facts['version'] = self.parse_version(data)
+ self.facts['model'] = self.parse_model(data)
+ self.facts['image'] = self.parse_image(data)
+
+ data = self.runner.get_command('show inventory')
+ self.facts['serialnum'] = self.parse_serialnum(data)
+
+ data = self.runner.get_command('show running-config | grep hostname')
+ self.facts['hostname'] = self.parse_hostname(data)
+
+ def parse_version(self, data):
+ match = re.search(r'Software Version:\s*(.+)', data)
+ if match:
+ return match.group(1)
+
+ def parse_hostname(self, data):
+ match = re.search(r'^hostname (.+)', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_model(self, data):
+ match = re.search(r'^System Type:\s*(.+)', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_image(self, data):
+ match = re.search(r'image file is "(.+)"', data)
+ if match:
+ return match.group(1)
+
+ def parse_serialnum(self, data):
+ for line in data.split('\n'):
+ if line.startswith('*'):
+ match = re.search(
+ r'\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)', line, re.M)
+ if match:
+ return match.group(3)
+
+
+class Hardware(FactsBase):
+
+ def commands(self):
+ self.runner.add_command('show file-systems')
+ self.runner.add_command('show memory | except Processor')
+
+ def populate(self):
+ data = self.runner.get_command('show file-systems')
+ self.facts['filesystems'] = self.parse_filesystems(data)
+
+ data = self.runner.get_command('show memory | except Processor')
+ match = re.findall('\s(\d+)\s', data)
+ if match:
+ self.facts['memtotal_mb'] = int(match[0]) / 1024
+ self.facts['memfree_mb'] = int(match[2]) / 1024
+
+ def parse_filesystems(self, data):
+ return re.findall(r'\s(\S+):$', data, re.M)
+
+
+class Config(FactsBase):
+
+ def commands(self):
+ self.runner.add_command('show running-config')
+
+ def populate(self):
+ self.facts['config'] = self.runner.get_command('show running-config')
+
+
+class Interfaces(FactsBase):
+
+ def commands(self):
+ self.runner.add_command('show interfaces')
+ self.runner.add_command('show ipv6 interface')
+ self.runner.add_command('show lldp neighbors detail')
+
+ def populate(self):
+ self.facts['all_ipv4_addresses'] = list()
+ self.facts['all_ipv6_addresses'] = list()
+
+ data = self.runner.get_command('show interfaces')
+ interfaces = self.parse_interfaces(data)
+
+ for key in interfaces.keys():
+ if "ManagementEthernet" in key:
+ temp_parsed = interfaces[key]
+ del interfaces[key]
+ interfaces.update(self.parse_mgmt_interfaces(temp_parsed))
+
+ for key in interfaces.keys():
+ if "Vlan" in key:
+ temp_parsed = interfaces[key]
+ del interfaces[key]
+ interfaces.update(self.parse_vlan_interfaces(temp_parsed))
+
+ self.facts['interfaces'] = self.populate_interfaces(interfaces)
+
+ data = self.runner.get_command('show ipv6 interface')
+ if len(data) > 0:
+ data = self.parse_ipv6_interfaces(data)
+ self.populate_ipv6_interfaces(data)
+
+ data = self.runner.get_command('show inventory')
+ if 'LLDP' in self.get_protocol_list(data):
+ neighbors = self.runner.get_command('show lldp neighbors detail')
+ self.facts['neighbors'] = self.parse_neighbors(neighbors)
+
+ def get_protocol_list(self, data):
+ start = False
+ protocol_list = list()
+ for line in data.split('\n'):
+ match = re.search(r'Software Protocol Configured\s*', line)
+ if match:
+ start = True
+ continue
+ if start:
+ line = line.strip()
+ if line.isalnum():
+ protocol_list.append(line)
+ return protocol_list
+
+ def populate_interfaces(self, interfaces):
+ facts = dict()
+ for key, value in interfaces.iteritems():
+ intf = dict()
+ intf['description'] = self.parse_description(value)
+ intf['macaddress'] = self.parse_macaddress(value)
+ ipv4 = self.parse_ipv4(value)
+ intf['ipv4'] = self.parse_ipv4(value)
+ if ipv4:
+ self.add_ip_address(ipv4['address'], 'ipv4')
+
+ intf['mtu'] = self.parse_mtu(value)
+ intf['bandwidth'] = self.parse_bandwidth(value)
+ intf['mediatype'] = self.parse_mediatype(value)
+ intf['duplex'] = self.parse_duplex(value)
+ intf['lineprotocol'] = self.parse_lineprotocol(value)
+ intf['operstatus'] = self.parse_operstatus(value)
+ intf['type'] = self.parse_type(value)
+
+ facts[key] = intf
+ return facts
+
+ def populate_ipv6_interfaces(self, data):
+ for key, value in data.iteritems():
+ self.facts['interfaces'][key]['ipv6'] = list()
+ addresses = re.findall(r'\s+(.+), subnet', value, re.M)
+ subnets = re.findall(r', subnet is (\S+)', value, re.M)
+ for addr, subnet in itertools.izip(addresses, subnets):
+ ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
+ self.add_ip_address(addr.strip(), 'ipv6')
+ self.facts['interfaces'][key]['ipv6'].append(ipv6)
+
+ def add_ip_address(self, address, family):
+ if family == 'ipv4':
+ self.facts['all_ipv4_addresses'].append(address)
+ else:
+ self.facts['all_ipv6_addresses'].append(address)
+
+ def parse_neighbors(self, neighbors):
+ facts = dict()
+
+ for entry in neighbors.split(
+ '========================================================================'):
+ if entry == '':
+ continue
+
+ intf = self.parse_lldp_intf(entry)
+ if intf not in facts:
+ facts[intf] = list()
+ fact = dict()
+ fact['host'] = self.parse_lldp_host(entry)
+ fact['port'] = self.parse_lldp_port(entry)
+ facts[intf].append(fact)
+ return facts
+
+ def parse_interfaces(self, data):
+ parsed = dict()
+ newline_count = 0
+ interface_start = True
+
+ for line in data.split('\n'):
+ if interface_start:
+ newline_count = 0
+ if len(line) == 0:
+ newline_count += 1
+ if newline_count == 2:
+ interface_start = True
+ continue
+ else:
+ match = re.match(r'^(\S+) (\S+)', line)
+ if match and interface_start:
+ interface_start = False
+ key = match.group(0)
+ parsed[key] = line
+ else:
+ parsed[key] += '\n%s' % line
+ return parsed
+
+ def parse_mgmt_interfaces(self, data):
+ parsed = dict()
+ interface_start = True
+ for line in data.split('\n'):
+ match = re.match(r'^(\S+) (\S+)', line)
+ if "Time since" in line:
+ interface_start = True
+ parsed[key] += '\n%s' % line
+ continue
+ elif match and interface_start:
+ interface_start = False
+ key = match.group(0)
+ parsed[key] = line
+ else:
+ parsed[key] += '\n%s' % line
+ return parsed
+
+ def parse_vlan_interfaces(self, data):
+ parsed = dict()
+ interface_start = True
+ line_before_end = False
+ for line in data.split('\n'):
+ match = re.match(r'^(\S+) (\S+)', line)
+ match_endline = re.match(r'^\s*\d+ packets, \d+ bytes$', line)
+
+ if "Output Statistics" in line:
+ line_before_end = True
+ parsed[key] += '\n%s' % line
+ elif match_endline and line_before_end:
+ line_before_end = False
+ interface_start = True
+ parsed[key] += '\n%s' % line
+ elif match and interface_start:
+ interface_start = False
+ key = match.group(0)
+ parsed[key] = line
+ else:
+ parsed[key] += '\n%s' % line
+ return parsed
+
+ def parse_ipv6_interfaces(self, data):
+ parsed = dict()
+ for line in data.split('\n'):
+ if len(line) == 0:
+ continue
+ elif line[0] == ' ':
+ parsed[key] += '\n%s' % line
+ else:
+ match = re.match(r'^(\S+) (\S+)', line)
+ if match:
+ key = match.group(0)
+ parsed[key] = line
+ return parsed
+
+ def parse_description(self, data):
+ match = re.search(r'Description: (.+)$', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_macaddress(self, data):
+ match = re.search(r'address is (\S+)', data)
+ if match:
+ if match.group(1) != "not":
+ return match.group(1)
+
+ def parse_ipv4(self, data):
+ match = re.search(r'Internet address is (\S+)', data)
+ if match:
+ if match.group(1) != "not":
+ addr, masklen = match.group(1).split('/')
+ return dict(address=addr, masklen=int(masklen))
+
+ def parse_mtu(self, data):
+ match = re.search(r'MTU (\d+)', data)
+ if match:
+ return int(match.group(1))
+
+ def parse_bandwidth(self, data):
+ match = re.search(r'LineSpeed (\d+)', data)
+ if match:
+ return int(match.group(1))
+
+ def parse_duplex(self, data):
+ match = re.search(r'(\w+) duplex', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_mediatype(self, data):
+ media = re.search(r'(.+) media present, (.+)', data, re.M)
+ if media:
+ match = re.search(r'type is (.+)$', media.group(0), re.M)
+ return match.group(1)
+
+ def parse_type(self, data):
+ match = re.search(r'Hardware is (.+),', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_lineprotocol(self, data):
+ match = re.search(r'line protocol is (\w+[ ]?\w*)\(?.*\)?$', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_operstatus(self, data):
+ match = re.search(r'^(?:.+) is (.+),', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_lldp_intf(self, data):
+ match = re.search(r'^\sLocal Interface (\S+\s\S+)', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_lldp_host(self, data):
+ match = re.search(r'Remote System Name: (.+)$', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_lldp_port(self, data):
+ match = re.search(r'Remote Port ID: (.+)$', data, re.M)
+ if match:
+ return match.group(1)
+
+
+FACT_SUBSETS = dict(
+ default=Default,
+ hardware=Hardware,
+ interfaces=Interfaces,
+ config=Config,
+)
+
+VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
+
+
+def main():
+ spec = dict(
+ gather_subset=dict(default=['!config'], type='list')
+ )
+
+ module = NetworkModule(argument_spec=spec, supports_check_mode=True)
+
+ gather_subset = module.params['gather_subset']
+
+ runable_subsets = set()
+ exclude_subsets = set()
+
+ for subset in gather_subset:
+ if subset == 'all':
+ runable_subsets.update(VALID_SUBSETS)
+ continue
+
+ if subset.startswith('!'):
+ subset = subset[1:]
+ if subset == 'all':
+ exclude_subsets.update(VALID_SUBSETS)
+ continue
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in VALID_SUBSETS:
+ module.fail_json(msg='Bad subset')
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(VALID_SUBSETS)
+
+ runable_subsets.difference_update(exclude_subsets)
+ runable_subsets.add('default')
+
+ facts = dict()
+ facts['gather_subset'] = list(runable_subsets)
+
+ runner = CommandRunner(module)
+
+ instances = list()
+ for key in runable_subsets:
+ runs = FACT_SUBSETS[key](runner)
+ instances.append(runs)
+
+ runner.run()
+
+ try:
+ for inst in instances:
+ inst.populate()
+ facts.update(inst.facts)
+ except Exception:
+ module.exit_json(out=module.from_json(runner.items))
+
+ ansible_facts = dict()
+ for key, value in facts.iteritems():
+ key = 'ansible_net_%s' % key
+ ansible_facts[key] = value
+
+ module.exit_json(ansible_facts=ansible_facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/eos/eos_template.py b/network/eos/_eos_template.py
similarity index 91%
rename from network/eos/eos_template.py
rename to network/eos/_eos_template.py
index c5361876598..35be114cf7f 100644
--- a/network/eos/eos_template.py
+++ b/network/eos/_eos_template.py
@@ -15,11 +15,15 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: eos_template
version_added: "2.1"
-author: "Peter sprygada (@privateip)"
+author: "Peter Sprygada (@privateip)"
short_description: Manage Arista EOS device configurations
description:
- Manages network device configurations over SSH or eAPI. This module
@@ -28,6 +32,7 @@
by evaluating the current running-config and only pushing configuration
commands that are not already configured. The config source can
be a set of commands or a template.
+deprecated: Deprecated in 2.2. Use eos_config instead
extends_documentation_fragment: eos
options:
src:
@@ -111,19 +116,22 @@
responses:
description: The set of responses from issuing the commands on the device
- retured: when not check_mode
+ returned: when not check_mode
type: list
sample: ['...', '...']
"""
import re
+import ansible.module_utils.eos
+
+from ansible.module_utils.network import NetworkModule
from ansible.module_utils.netcfg import NetworkConfig, dumps
-from ansible.module_utils.eos import NetworkModule
def get_config(module):
config = module.params.get('config')
+ defaults = module.params['include_defaults']
if not config and not module.params['force']:
- config = module.config.get_config()
+ config = module.config.get_config(include_defaults=defaults)
return config
def filter_exit(commands):
@@ -198,7 +206,8 @@ def main():
commands = filter_exit(commands)
if commands:
if not module.check_mode:
- response = module.config(commands, replace=replace)
+ response = module.config.load_config(commands, replace=replace,
+ commit=True)
result['responses'] = response
result['changed'] = True
diff --git a/network/eos/eos_command.py b/network/eos/eos_command.py
index 2690ef9bf7c..110cfb60cea 100644
--- a/network/eos/eos_command.py
+++ b/network/eos/eos_command.py
@@ -16,12 +16,16 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: eos_command
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
-short_description: Run arbitrary commands on EOS device
+short_description: Run arbitrary commands on an Arista EOS device
description:
- Sends an arbitrary set of commands to an EOS node and returns the results
read from the device. This module includes an
@@ -88,10 +92,10 @@
password: admin
transport: cli
-- name: run show verion on remote devices
+- name: run show version on remote devices
eos_command:
commands: show version
- provider "{{ cli }}"
+ provider: "{{ cli }}"
- name: run show version and check to see if output contains Arista
eos_command:
@@ -106,7 +110,7 @@
- show interfaces
provider: "{{ cli }}"
-- name: run multiple commands and evalute the output
+- name: run multiple commands and evaluate the output
eos_command:
commands:
- show version
@@ -139,26 +143,32 @@
failed_conditions:
description: the conditionals that failed
- retured: failed
+ returned: failed
type: list
sample: ['...', '...']
"""
+
+import ansible.module_utils.eos
+
from ansible.module_utils.basic import get_exception
+from ansible.module_utils.network import NetworkModule, NetworkError
from ansible.module_utils.netcli import CommandRunner
-from ansible.module_utils.netcli import AddCommandError, FailedConditionsError
-from ansible.module_utils.eos import NetworkModule, NetworkError
+from ansible.module_utils.netcli import AddCommandError, AddConditionError
+from ansible.module_utils.netcli import FailedConditionsError
+from ansible.module_utils.netcli import FailedConditionalError
+from ansible.module_utils.six import string_types
VALID_KEYS = ['command', 'output', 'prompt', 'response']
def to_lines(stdout):
for item in stdout:
- if isinstance(item, basestring):
+ if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module):
for cmd in module.params['commands']:
- if isinstance(cmd, basestring):
+ if isinstance(cmd, string_types):
cmd = dict(command=cmd, output=None)
elif 'command' not in cmd:
module.fail_json(msg='command keyword argument is required')
@@ -182,7 +192,6 @@ def main():
)
module = NetworkModule(argument_spec=spec,
- connect_on_load=False,
supports_check_mode=True)
commands = list(parse_commands(module))
@@ -207,8 +216,13 @@ def main():
exc = get_exception()
warnings.append('duplicate command detected: %s' % cmd)
- for item in conditionals:
- runner.add_conditional(item)
+ try:
+ for item in conditionals:
+ runner.add_conditional(item)
+ except AddConditionError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), condition=exc.condition)
+
runner.retries = module.params['retries']
runner.interval = module.params['interval']
@@ -219,9 +233,12 @@ def main():
except FailedConditionsError:
exc = get_exception()
module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
+ except FailedConditionalError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), failed_conditional=exc.failed_conditional)
except NetworkError:
exc = get_exception()
- module.fail_json(msg=str(exc))
+ module.fail_json(msg=str(exc), **exc.kwargs)
result = dict(changed=False, stdout=list())
diff --git a/network/eos/eos_config.py b/network/eos/eos_config.py
index ab6aaac1eeb..731b11efb90 100644
--- a/network/eos/eos_config.py
+++ b/network/eos/eos_config.py
@@ -15,6 +15,11 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: eos_config
@@ -52,7 +57,7 @@
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
- This arugment is mutually exclusive with the I(lines) and
+ This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
required: false
default: null
@@ -69,7 +74,7 @@
after:
description:
- The ordered set of commands to append to the end of the command
- stack if a changed needs to be made. Just like with I(before) this
+ stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
@@ -80,8 +85,10 @@
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
- to position. Finally if match is set to I(exact), command lines
- must be an equal match.
+ to position. If match is set to I(exact), command lines
+ must be an equal match. Finally, if match is set to I(none), the
+ module will not attempt to compare the source configuration with
+ the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
@@ -95,7 +102,7 @@
line is not correct.
required: false
default: line
- choices: ['line', 'block']
+ choices: ['line', 'block', 'config']
force:
description:
- The force argument instructs the module to not consider the
@@ -103,23 +110,21 @@
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
- the equivalient, set the match argument to none. This argument
+ the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: ['yes', 'no']
- update:
+ backup:
description:
- - The I(update) argument controls how the configuration statements
- are processed on the remote device. Valid choices for the I(update)
- argument are I(merge) I(replace) and I(check). When the argument is
- set to I(merge), the configuration changes are merged with the current
- device running configuration. When the argument is set to I(check)
- the configuration updates are determined but not actually configured
- on the remote device.
+ - This argument will cause the module to create a full backup of
+ the current C(running-config) from the remote device before any
+ changes are made. The backup file is written to the C(backup)
+ folder in the playbook root directory. If the directory does not
+ exist, it is created.
required: false
- default: merge
- choices: ['merge', 'replace', 'check']
+ default: no
+ choices: ['yes', 'no']
version_added: "2.2"
config:
description:
@@ -144,7 +149,7 @@
version_added: "2.2"
save:
description:
- - The I(save) argument will instruct the module to save the
+ - The C(save) argument instructs the module to save the
running-config to startup-config. This operation is performed
after any changes are made to the current running config. If
no changes are made, the configuration is still saved to the
@@ -153,16 +158,6 @@
required: false
default: false
version_added: "2.2"
- state:
- description:
- - The I(state) argument specifies the state of the config
- file on the device. When set to present, the configuration
- is updated based on the values of the module. When the value
- is set to absent, the device startup config is erased.
- required: true
- default: present
- choices: ['present', 'absent']
- version_added: "2.2"
"""
EXAMPLES = """
@@ -201,14 +196,24 @@
before: no ip access-list test
replace: block
provider: "{{ cli }}"
+
+- name: load configuration from file
+ eos_config:
+ src: eos.cfg
+ provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
- returned: always
+ returned: Only when lines is specified.
type: list
sample: ['...', '...']
+backup_path:
+ description: The full path to the backup file
+ returned: when backup is yes
+ type: path
+ sample: /playbooks/ansible/backup/eos_config.2016-07-16@22:28:34
"""
import time
@@ -216,21 +221,17 @@
from ansible.module_utils.eos import NetworkModule, NetworkError
from ansible.module_utils.basic import get_exception
-def invoke(name, *args, **kwargs):
- func = globals().get(name)
- if func:
- return func(*args, **kwargs)
-
def check_args(module, warnings):
- if module.params['save'] and module.check_mode:
- warnings.append('will not save configuration due to checkmode')
- if module.params['parents'] and module.params['src']:
- warnings.append('ignoring parents argument when src specified')
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
'removed in the future')
+ if not module.connection.supports_sessions():
+ warnings.append('The current version of EOS on the remote device does '
+ 'not support configuration sessions. The commit '
+ 'argument will be ignored')
+
def get_candidate(module):
candidate = NetworkConfig(indent=3)
if module.params['src']:
@@ -240,122 +241,95 @@ def get_candidate(module):
candidate.add(module.params['lines'], parents=parents)
return candidate
-def get_config(module, result, defaults=False):
- defaults = module.params['defaults']
- if defaults is True:
- key = '__configall__'
- else:
- key = '__config__'
-
- contents = module.params['config'] or result.get(key)
-
+def get_config(module, defaults=False):
+ contents = module.params['config']
if not contents:
+ defaults = module.params['defaults']
contents = module.config.get_config(include_defaults=defaults)
- result[key] = contents
-
return NetworkConfig(indent=3, contents=contents)
-def backup_config(module, result):
- if '__config__' not in result:
- result['__config__'] = module.config.get_config()
- result['__backup__'] = result['__config__']
-
def load_config(module, commands, result):
- session = 'ansible_%s' % int(time.time())
-
- # save the sesion name in case we need later
- result['__session__'] = session
-
- replace = module.params['update'] == 'replace'
+ replace = module.params['replace'] == 'config'
commit = not module.check_mode
- diff = module.config.load_config(commands, session=session,
- replace=replace, commit=commit)
-
- # once the configuration is done, remove the config session and
- # remove the session name from the result
- module.cli(['no configure session %s' % session])
- del result['__session__']
+ diff = module.config.load_config(commands, replace=replace, commit=commit)
- result['diff'] = dict(prepared=diff)
- result['changed'] = not diff
+ if diff and module.connection.supports_sessions():
+ result['diff'] = dict(prepared=diff)
+ result['changed'] = True
+ elif diff:
+ result['changed'] = True
-def present(module, result):
+def run(module, result):
match = module.params['match']
replace = module.params['replace']
- update = module.params['update']
candidate = get_candidate(module)
- if match != 'none' and update != 'replace':
- config = get_config(module, result)
+ if match != 'none' and replace != 'config':
+ config = get_config(module)
configobjs = candidate.difference(config, match=match, replace=replace)
else:
- config = None
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
- if module.params['before']:
- commands[:0] = module.params['before']
+ if module.params['lines']:
+ if module.params['before']:
+ commands[:0] = module.params['before']
- if module.params['after']:
- commands.extend(module.params['after'])
+ if module.params['after']:
+ commands.extend(module.params['after'])
- result['updates'] = commands
+ result['updates'] = commands
- if update != 'check':
- load_config(module, commands, result)
+ module.log('commands: %s' % commands)
+ load_config(module, commands, result)
- if module.params['save'] and not module.check_mode:
- module.config.save_config()
-
-def absent(module, result):
- if not module.check_mode:
- module.cli('write erase')
- result['changed'] = True
+ if module.params['save']:
+ if not module.check_mode:
+ module.config.save_config()
+ result['changed'] = True
def main():
""" main entry point for module execution
"""
-
argument_spec = dict(
+ src=dict(type='path'),
+
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
- src=dict(type='path'),
-
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
- replace=dict(default='line', choices=['line', 'block']),
+ replace=dict(default='line', choices=['line', 'block', 'config']),
# this argument is deprecated in favor of setting match: none
# it will be removed in a future version
force=dict(default=False, type='bool'),
- update=dict(choices=['merge', 'replace', 'check'], default='merge'),
- backup=dict(type='bool', default=False),
-
config=dict(),
defaults=dict(type='bool', default=False),
- save=dict(default=False),
-
- state=dict(default='present', choices=['absent', 'present'])
+ backup=dict(type='bool', default=False),
+ save=dict(default=False, type='bool'),
)
mutually_exclusive = [('lines', 'src')]
+ required_if = [('match', 'strict', ['lines']),
+ ('match', 'exact', ['lines']),
+ ('replace', 'block', ['lines']),
+ ('replace', 'config', ['src'])]
+
module = NetworkModule(argument_spec=argument_spec,
- connect_on_load=False,
mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
supports_check_mode=True)
- state = module.params['state']
-
if module.params['force'] is True:
module.params['match'] = 'none'
@@ -365,17 +339,16 @@ def main():
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
- result['__backup__'] = backup_config(module, result)
+ result['__backup__'] = module.config.get_config()
try:
- invoke(state, module, result)
+ run(module, result)
except NetworkError:
exc = get_exception()
- module.fail_json(msg=str(exc))
+ module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**result)
if __name__ == '__main__':
main()
-
diff --git a/network/eos/eos_eapi.py b/network/eos/eos_eapi.py
index e4207682364..a5cfafabd69 100644
--- a/network/eos/eos_eapi.py
+++ b/network/eos/eos_eapi.py
@@ -17,12 +17,16 @@
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: eos_eapi
version_added: "2.1"
-author: "Chris Houseknecht (@chouseknecht)"
-short_description: Manage and configure eAPI.
+author: "Peter Sprygada (@privateip)"
+short_description: Manage and configure Arista EOS eAPI.
requirements:
- "EOS v4.12 or greater"
description:
@@ -35,259 +39,334 @@
- Requires EOS v4.12 or greater.
extends_documentation_fragment: eos
options:
- state:
- description:
- - A state of I(started) will
- enable eAPI access, and a state of I(stopped) will
- disable or shutdown all eAPI access.
- choices:
- - started
- - stopped
- required: false
- default: started
- http_port:
- description:
- - Port on which the HTTP server will listen.
- required: false
- default: 80
- https_port:
- description:
- - Port on which the HTTPS server will listen.
- required: false
- default: 443
- local_http_port:
- description:
- - Port on which the local HTTP server will listen.
- required: false
- default: 8080
- http:
- description:
- - Enable HTTP server access.
- required: false
- default: true
- choices:
- - yes
- - no
- aliases:
- - enable_http
- https:
- description:
- - Enable HTTPS server access.
- required: false
- default: true
- choices:
- - yes
- - no
- aliases:
- - enable_https
- local_http:
- description:
- - Enable local HTTP server access.
- required: false
- default: false
- choices:
- - yes
- - no
- aliases:
- - enable_local_http
- socket:
- description:
- - Enable Unix socket server access.
- required: false
- default: false
- choices:
- - yes
- - no
- aliases:
- - enable_socket
+ http:
+ description:
+ - The C(http) argument controls the operating state of the HTTP
+ transport protocol when eAPI is present in the running-config.
+ When the value is set to True, the HTTP protocol is enabled and
+ when the value is set to False, the HTTP protocol is disabled.
+ By default, when eAPI is first configured, the HTTP protocol is
+ disabled.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ aliases: ['enable_http']
+ http_port:
+ description:
+ - Configures the HTTP port that will listen for connections when
+ the HTTP transport protocol is enabled. This argument accepts
+ integer values in the valid range of 1 to 65535.
+ required: false
+ default: 80
+ https:
+ description:
+ - The C(https) argument controls the operating state of the HTTPS
+ transport protocol when eAPI is present in the running-config.
+ When the value is set to True, the HTTPS protocol is enabled and
+ when the value is set to False, the HTTPS protocol is disabled.
+ By default, when eAPI is first configured, the HTTPS protocol is
+ enabled.
+ required: false
+ default: yes
+ choices: ['yes', 'no']
+ aliases: ['enable_http']
+ https_port:
+ description:
+ - Configures the HTTP port that will listen for connections when
+ the HTTP transport protocol is enabled. This argument accepts
+ integer values in the valid range of 1 to 65535.
+ required: false
+ default: 443
+ local_http:
+ description:
+ - The C(local_http) argument controls the operating state of the
+ local HTTP transport protocol when eAPI is present in the
+ running-config. When the value is set to True, the HTTP protocol
+ is enabled and restricted to connections from localhost only. When
+ the value is set to False, the HTTP local protocol is disabled.
+ - Note is value is independent of the C(http) argument
+ required: false
+ default: false
+ choices: ['yes', 'no']
+ aliases: ['enable_local_http']
+ local_http_port:
+ description:
+ - Configures the HTTP port that will listen for connections when
+ the HTTP transport protocol is enabled. This argument accepts
+ integer values in the valid range of 1 to 65535.
+ required: false
+ default: 8080
+ socket:
+ description:
+ - The C(socket) argument controls the operating state of the UNIX
+ Domain Socket used to receive eAPI requests. When the value
+ of this argument is set to True, the UDS will listen for eAPI
+ requests. When the value is set to False, the UDS will not be
+ available to handle requests. By default when eAPI is first
+ configured, the UDS is disabled.
+ required: false
+ default: false
+ choices: ['yes', 'no']
+ aliases: ['enable_socket']
+ vrf:
+ description:
+ - The C(vrf) argument will configure eAPI to listen for connections
+ in the specified VRF. By default, eAPI transports will listen
+ for connections in the global table. This value requires the
+ VRF to already be created otherwise the task will fail.
+ required: false
+ default: default
+ version_added: "2.2"
+ config:
+ description:
+ - The module, by default, will connect to the remote device and
+ retrieve the current running-config to use as a base for comparing
+ against the contents of source. There are times when it is not
+ desirable to have the task get the current running-config for
+ every task in a playbook. The I(config) argument allows the
+ implementer to pass in the configuration to use as the base
+ config for comparison.
+ required: false
+ default: nul
+ version_added: "2.2"
+ state:
+ description:
+ - The C(state) argument controls the operational state of eAPI
+ on the remote device. When this argument is set to C(started),
+ eAPI is enabled to receive requests and when this argument is
+ C(stopped), eAPI is disabled and will not receive requests.
+ required: false
+ default: started
+ choices: ['started', 'stopped']
"""
EXAMPLES = """
- - name: Enable eAPI access with default configuration
- eos_eapi:
- state: started
- provider: {{ provider }}
-
- - name: Enable eAPI with no HTTP, HTTPS at port 9443, local HTTP at port 80, and socket enabled
- eos_eapi:
- state: started
- http: false
- https_port: 9443
- local_http: yes
- local_http_port: 80
- socket: yes
- provider: {{ provider }}
-
- - name: Shutdown eAPI access
- eos_eapi:
- state: stopped
- provider: {{ provider }}
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: admin
+ password: admin
+
+- name: Enable eAPI access with default configuration
+ eos_eapi:
+ state: started
+ provider: "{{ cli }}"
+
+- name: Enable eAPI with no HTTP, HTTPS at port 9443, local HTTP at port 80, and socket enabled
+ eos_eapi:
+ state: started
+ http: false
+ https_port: 9443
+ local_http: yes
+ local_http_port: 80
+ socket: yes
+ provider: "{{ cli }}"
+
+- name: Shutdown eAPI access
+ eos_eapi:
+ state: stopped
+ provider: "{{ cli }}"
"""
RETURN = """
-changed:
- description:
- - Indicates if commands were sent to the device.
- returned: always
- type: boolean
- sample: false
-
-commands:
- description:
- - Set of commands to be executed on remote device
- returned: always
- type: list
- sample: [
- 'management api http-commands',
- 'shutdown'
- ]
-
-_config:
- description:
- - Configuration found on the device prior to executing any commands.
- returned: always
- type: object
- sample: {...}
+updates:
+ description:
+ - Set of commands to be executed on remote device
+ returned: always
+ type: list
+ sample: ['management api http-commands', 'shutdown']
+urls:
+ description: Hash of URL endpoints eAPI is listening on per interface
+ returned: when eAPI is started
+ type: dict
+ sample: {'Management1': ['http://172.26.10.1:80']}
"""
-
-
-def http_commands(protocol, port, enable, config):
-
- started_config = config['{0}Server'.format(protocol)]
- commands = []
- changed = False
-
- if started_config.get('running'):
- if not enable:
- # turn off server
- commands.append('no protocol {0}'.format(protocol))
- changed = True
- elif started_config.get('port') != port:
- # update the port
- commands.append('protocol {0} port {1}'.format(protocol, port))
- changed = True
- elif not started_config.get('running') and enable:
- # turn on server
- commands.append('protocol {0} port {1}'.format(protocol, port))
- changed = True
-
- return commands, changed
-
-
-def execute_commands(module, commands):
-
- if not module.params.get('check_mode'):
- module.configure(commands)
-
-
-def config_server(module):
-
- state = module.params.get('state')
- local_http_port = module.params.get('local_http_port')
- socket= module.params.get('socket')
- local_http = module.params.get('local_http')
- config = module.from_json(module.execute(['show management api http-commands | json'])[0])
- result = dict(changed=False, _config=config, commands=[])
- commands = [
- 'management api http-commands'
- ]
-
- if not config.get('enabled'):
- if state == 'started':
- # turn on eAPI access
- commands.append('no shutdown')
- result['changed'] = True
- else:
- # state is stopped. nothing to do
- return result
-
- if config.get('enabled') and state == 'stopped':
- # turn off eAPI access and exit
- commands.append('shutdown')
- result['changed'] = True
- result['commands'] = commands
- execute_commands(module, commands)
- return result
-
- # http and https
- for protocol in ['http', 'https']:
- cmds, chg = http_commands(protocol, module.params['{0}_port'.format(protocol)],
- module.params['{0}'.format(protocol)], config)
- if chg:
- commands += cmds
- result['changed'] = True
-
- # local HTTP
- if config.get('localHttpServer').get('running'):
- if not local_http:
- # turn off local http server
- commands.append('no protocol http localhost')
- result['changed'] = True
- elif config.get('localHttpServer').get('port') != local_http_port:
- # update the local http port
- commands.append('protocol http localhost port {0}'.format(local_http_port))
- result['changed'] = True
-
- if not config.get('localHttpServer').get('running') and local_http:
- # turn on local http server
- commands.append('protocol http localhost port {0}'.format(local_http_port))
- result['changed'] = True
-
- # socket server
- if config.get('unixSocketServer').get('running') and not socket:
- # turn off unix socket
+import re
+import time
+
+import ansible.module_utils.eos
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.netcfg import NetworkConfig, dumps
+
+PRIVATE_KEYS_RE = re.compile('__.+__')
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+def get_instance(module):
+ try:
+ resp = module.cli('show management api http-commands', 'json')
+ return dict(
+ http=resp[0]['httpServer']['configured'],
+ http_port=resp[0]['httpServer']['port'],
+ https=resp[0]['httpsServer']['configured'],
+ https_port=resp[0]['httpsServer']['port'],
+ local_http=resp[0]['localHttpServer']['configured'],
+ local_http_port=resp[0]['localHttpServer']['port'],
+ socket=resp[0]['unixSocketServer']['configured'],
+ vrf=resp[0]['vrf']
+ )
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), **exc.kwargs)
+
+def started(module, instance, commands):
+ commands.append('no shutdown')
+ setters = set()
+ for key, value in module.argument_spec.iteritems():
+ if module.params[key] is not None:
+ setter = value.get('setter') or 'set_%s' % key
+ if setter not in setters:
+ setters.add(setter)
+ invoke(setter, module, instance, commands)
+
+def stopped(module, instance, commands):
+ commands.append('shutdown')
+
+def set_protocol_http(module, instance, commands):
+ port = module.params['http_port']
+ if not 1 <= port <= 65535:
+ module.fail_json(msg='http_port must be between 1 and 65535')
+ elif any((module.params['http'], instance['http'])):
+ commands.append('protocol http port %s' % port)
+ elif module.params['http'] is False:
+ commands.append('no protocol http')
+
+def set_protocol_https(module, instance, commands):
+ port = module.params['https_port']
+ if not 1 <= port <= 65535:
+ module.fail_json(msg='https_port must be between 1 and 65535')
+ elif any((module.params['https'], instance['https'])):
+ commands.append('protocol https port %s' % port)
+ elif module.params['https'] is False:
+ commands.append('no protocol https')
+
+def set_local_http(module, instance, commands):
+ port = module.params['local_http_port']
+ if not 1 <= port <= 65535:
+ module.fail_json(msg='local_http_port must be between 1 and 65535')
+ elif any((module.params['local_http'], instance['local_http'])):
+ commands.append('protocol http localhost port %s' % port)
+ elif module.params['local_http'] is False:
+ commands.append('no protocol http localhost port 8080')
+
+def set_socket(module, instance, commands):
+ if any((module.params['socket'], instance['socket'])):
+ commands.append('protocol unix-socket')
+ elif module.params['socket'] is False:
commands.append('no protocol unix-socket')
- result['changed'] = True
- if not config.get('unixSocketServer').get('running') and socket:
- # turn on unix socket
- commands.append('protocol unix-socket')
+def set_vrf(module, instance, commands):
+ vrf = module.params['vrf']
+ if vrf != 'default':
+ resp = module.cli(['show vrf'])
+ if vrf not in resp[0]:
+ module.fail_json(msg="vrf '%s' is not configured" % vrf)
+ commands.append('vrf %s' % vrf)
+
+def get_config(module):
+ contents = module.params['config']
+ if not contents:
+ cmd = 'show running-config all section management api http-commands'
+ contents = module.cli([cmd])
+ config = NetworkConfig(indent=3, contents=contents[0])
+ return config
+
+def load_config(module, instance, commands, result):
+ commit = not module.check_mode
+ diff = module.config.load_config(commands, commit=commit)
+ if diff:
+ result['diff'] = dict(prepared=diff)
result['changed'] = True
- if len(commands) > 1:
- # something requires change
- execute_commands(module, commands)
- result['commands'] = commands
-
- return result
+def load(module, instance, commands, result):
+ candidate = NetworkConfig(indent=3)
+ candidate.add(commands, parents=['management api http-commands'])
+
+ config = get_config(module)
+ configobjs = candidate.difference(config)
+
+ if configobjs:
+ commands = dumps(configobjs, 'commands').split('\n')
+ result['updates'] = commands
+ load_config(module, instance, commands, result)
+
+def clean_result(result):
+ # strip out any keys that have two leading and two trailing
+ # underscore characters
+ for key in result.keys():
+ if PRIVATE_KEYS_RE.match(key):
+ del result[key]
+
+def collect_facts(module, result):
+ resp = module.cli(['show management api http-commands'], output='json')
+ facts = dict(eos_eapi_urls=dict())
+ for each in resp[0]['urls']:
+ intf, url = each.split(' : ')
+ key = str(intf).strip()
+ if key not in facts['eos_eapi_urls']:
+ facts['eos_eapi_urls'][key] = list()
+ facts['eos_eapi_urls'][key].append(str(url).strip())
+ result['ansible_facts'] = facts
-def check_version(module):
- config = module.from_json(module.execute(['show version | json'])[0])
- versions = config['version'].split('.')
- if int(versions[0]) < 4 or int(versions[1]) < 12:
- module.fail_json(msg="Device version {0} does not support eAPI. eAPI was introduced in EOS 4.12.")
def main():
""" main entry point for module execution
"""
argument_spec = dict(
- state=dict(default='started', choices=['stopped','started']),
- http_port=dict(default=80, type='int'),
- https_port=dict(default=443, type='int'),
- local_http_port=dict(default=8080, type='int'),
- http=dict(aliases=['enable_http'], default=True, type='bool'),
- https=dict(aliases=['enable_https'], default=True, type='bool'),
+ http=dict(aliases=['enable_http'], default=False, type='bool', setter='set_protocol_http'),
+ http_port=dict(default=80, type='int', setter='set_protocol_http'),
+
+ https=dict(aliases=['enable_https'], default=True, type='bool', setter='set_protocol_https'),
+ https_port=dict(default=443, type='int', setter='set_protocol_https'),
+
+ local_http=dict(aliases=['enable_local_http'], default=False, type='bool', setter='set_local_http'),
+ local_http_port=dict(default=8080, type='int', setter='set_local_http'),
+
socket=dict(aliases=['enable_socket'], default=False, type='bool'),
- local_http=dict(aliases=['enable_local_http'], default=False, type='bool'),
+
+ vrf=dict(default='default'),
+
+ config=dict(),
# Only allow use of transport cli when configuring eAPI
- transport=dict(required=True, choices=['cli'])
+ transport=dict(default='cli', choices=['cli']),
+
+ state=dict(default='started', choices=['stopped', 'started']),
)
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = NetworkModule(argument_spec=argument_spec,
+ connect_on_load=False,
+ supports_check_mode=True)
+
+ state = module.params['state']
+
+ result = dict(changed=False)
+
+ commands = list()
+ instance = get_instance(module)
- check_version(module)
+ invoke(state, module, instance, commands)
- result = config_server(module)
+ try:
+ load(module, instance, commands, result)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), **exc.kwargs)
- return module.exit_json(**result)
+ collect_facts(module, result)
+ clean_result(result)
+ module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.eos import *
if __name__ == '__main__':
main()
diff --git a/network/eos/eos_facts.py b/network/eos/eos_facts.py
index affd00375d2..5e7cb2a7bee 100644
--- a/network/eos/eos_facts.py
+++ b/network/eos/eos_facts.py
@@ -16,12 +16,16 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: eos_facts
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
-short_description: Collect facts from remote devices running EOS
+short_description: Collect facts from remote devices running Arista EOS
description:
- Collects a base set of device facts from a remote device that
is running eos. This module prepends all of the
@@ -103,7 +107,7 @@
# hardware
ansible_net_filesystems:
- description: All file system names availabe on the device
+ description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_memfree_mb:
@@ -141,8 +145,8 @@
"""
import re
-from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner, AddCommandError
+from ansible.module_utils.six import iteritems
from ansible.module_utils.eos import NetworkModule
@@ -163,6 +167,10 @@ def __init__(self, runner):
self.load_commands()
+ def load_commands(self):
+ raise NotImplementedError
+
+
class Default(FactsBase):
SYSTEM_MAP = {
@@ -178,7 +186,7 @@ def load_commands(self):
def populate(self):
data = self.runner.get_command('show version', 'json')
- for key, value in self.SYSTEM_MAP.iteritems():
+ for key, value in iteritems(self.SYSTEM_MAP):
if key in data:
self.facts[value] = data[key]
@@ -256,10 +264,10 @@ def populate(self):
def populate_interfaces(self, data):
facts = dict()
- for key, value in data['interfaces'].iteritems():
+ for key, value in iteritems(data['interfaces']):
intf = dict()
- for remote, local in self.INTERFACE_MAP.iteritems():
+ for remote, local in iteritems(self.INTERFACE_MAP):
if remote in value:
intf[local] = value[remote]
@@ -336,7 +344,8 @@ def main():
exclude = False
if subset not in VALID_SUBSETS:
- module.fail_json(msg='Bad subset')
+ module.fail_json(msg='Subset must be one of [%s], got %s' %
+ (', '.join(VALID_SUBSETS), subset))
if exclude:
exclude_subsets.add(subset)
@@ -365,11 +374,10 @@ def main():
inst.populate()
facts.update(inst.facts)
except Exception:
- raise
module.exit_json(out=module.from_json(runner.items))
ansible_facts = dict()
- for key, value in facts.iteritems():
+ for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
@@ -378,4 +386,3 @@ def main():
if __name__ == '__main__':
main()
-
diff --git a/network/ios/ios_template.py b/network/ios/_ios_template.py
similarity index 92%
rename from network/ios/ios_template.py
rename to network/ios/_ios_template.py
index 067946829a7..62186320caa 100644
--- a/network/ios/ios_template.py
+++ b/network/ios/_ios_template.py
@@ -15,6 +15,11 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+
DOCUMENTATION = """
---
module: ios_template
@@ -28,6 +33,7 @@
by evaluating the current running-config and only pushing configuration
commands that are not already configured. The config source can
be a set of commands or a template.
+deprecated: Deprecated in 2.2. Use ios_config instead
extends_documentation_fragment: ios
options:
src:
@@ -93,7 +99,7 @@
src: config.j2
force: yes
-- name: provide the base configuration for comparision
+- name: provide the base configuration for comparison
ios_template:
host: hostname
username: foo
@@ -114,13 +120,15 @@
type: list
sample: ['...', '...']
"""
+import ansible.module_utils.ios
from ansible.module_utils.netcfg import NetworkConfig, dumps
-from ansible.module_utils.ios import NetworkModule, NetworkError
+from ansible.module_utils.ios import NetworkModule
def get_config(module):
config = module.params['config'] or dict()
+ defaults = module.params['include_defaults']
if not config and not module.params['force']:
- config = module.config.get_config()
+ config = module.config.get_config(include_defaults=defaults)
return config
def main():
diff --git a/network/ios/ios_command.py b/network/ios/ios_command.py
index 31ae8da9631..01878856c00 100644
--- a/network/ios/ios_command.py
+++ b/network/ios/ios_command.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: ios_command
@@ -94,20 +98,20 @@
- name: run show version on remote devices
ios_command:
commands: show version
- provider "{{ cli }}"
+ provider: "{{ cli }}"
- name: run show version and check to see if output contains IOS
ios_command:
commands: show version
wait_for: result[0] contains IOS
- provider "{{ cli }}"
+ provider: "{{ cli }}"
- name: run multiple commands on remote nodes
ios_command:
commands:
- show version
- show interfaces
- provider "{{ cli }}"
+ provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
ios_command:
@@ -139,34 +143,34 @@
type: list
sample: ['...', '...']
"""
+import ansible.module_utils.ios
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner
from ansible.module_utils.netcli import AddCommandError, FailedConditionsError
-from ansible.module_utils.ios import NetworkModule, NetworkError
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.six import string_types
-VALID_KEYS = ['command', 'output', 'prompt', 'response']
+VALID_KEYS = ['command', 'prompt', 'response']
def to_lines(stdout):
for item in stdout:
- if isinstance(item, basestring):
+ if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module):
for cmd in module.params['commands']:
- if isinstance(cmd, basestring):
+ if isinstance(cmd, string_types):
cmd = dict(command=cmd, output=None)
elif 'command' not in cmd:
module.fail_json(msg='command keyword argument is required')
- elif cmd.get('output') not in [None, 'text', 'json']:
- module.fail_json(msg='invalid output specified for command')
elif not set(cmd.keys()).issubset(VALID_KEYS):
module.fail_json(msg='unknown keyword specified')
yield cmd
def main():
spec = dict(
- # { command: , output: , prompt: , response: }
+ # { command: , prompt: , response: }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
diff --git a/network/ios/ios_config.py b/network/ios/ios_config.py
index 5e71d79ff31..dd77449e5bc 100644
--- a/network/ios/ios_config.py
+++ b/network/ios/ios_config.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: ios_config
@@ -63,7 +67,7 @@
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
- against the system
+ against the system.
required: false
default: null
after:
@@ -94,10 +98,19 @@
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
- line is not correct
+ line is not correct.
required: false
default: line
choices: ['line', 'block']
+ multiline_delimiter:
+ description:
+ - This arugment is used when pushing a multiline configuration
+ element to the IOS device. It specifies the character to use
+ as the delimiting character. This only applies to the
+ configuration action
+ required: false
+ default: "@"
+ version_added: "2.3"
force:
description:
- The force argument instructs the module to not consider the
@@ -105,36 +118,11 @@
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
- the equivalient, set the match argument to none. This argument
+ the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
- choices: [ "true", "false" ]
- version_added: "2.2"
- update:
- description:
- - The I(update) argument controls how the configuration statements
- are processed on the remote device. Valid choices for the I(update)
- argument are I(merge) and I(check). When the argument is set to
- I(merge), the configuration changes are merged with the current
- device running configuration. When the argument is set to I(check)
- the configuration updates are determined but not actually configured
- on the remote device.
- required: false
- default: merge
- choices: ['merge', 'check']
- version_added: "2.2"
- commit:
- description:
- - This argument specifies the update method to use when applying the
- configuration changes to the remote node. If the value is set to
- I(merge) the configuration updates are merged with the running-
- config. If the value is set to I(check), no changes are made to
- the remote host.
- required: false
- default: merge
- choices: ['merge', 'check']
- version_added: "2.2"
+ choices: ["true", "false"]
backup:
description:
- This argument will cause the module to create a full backup of
@@ -148,14 +136,14 @@
version_added: "2.2"
config:
description:
- - The C(config) argument allows the playbook desginer to supply
+ - The C(config) argument allows the playbook designer to supply
the base configuration to be used to validate configuration
changes necessary. If this argument is provided, the module
will not download the running-config from the remote node.
required: false
default: null
version_added: "2.2"
- default:
+ defaults:
description:
- This argument specifies whether or not to collect all defaults
when getting the remote device running config. When enabled,
@@ -174,15 +162,6 @@
default: no
choices: ['yes', 'no']
version_added: "2.2"
- state:
- description:
- - This argument specifies whether or not the running-config is
- present on the remote device. When set to I(absent) the
- running-config on the remote device is erased.
- required: false
- default: no
- choices: ['yes', 'no']
- version_added: "2.2"
"""
EXAMPLES = """
@@ -226,7 +205,7 @@
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
- returned: always
+ returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
@@ -234,156 +213,172 @@
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/ios_config.2016-07-16@22:28:34
-responses:
- description: The set of responses from issuing the commands on the device
- returned: when not check_mode
- type: list
- sample: ['...', '...']
"""
import re
+import time
from ansible.module_utils.basic import get_exception
+from ansible.module_utils.six import iteritems
from ansible.module_utils.ios import NetworkModule, NetworkError
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.netcli import Command
-def invoke(name, *args, **kwargs):
- func = globals().get(name)
- if func:
- return func(*args, **kwargs)
def check_args(module, warnings):
- if module.params['parents']:
- if not module.params['lines'] or module.params['src']:
- warnings.append('ignoring unnecessary argument parents')
- if module.params['match'] == 'none' and module.params['replace']:
- warnings.append('ignorning unnecessary argument replace')
+ if module.params['multiline_delimiter']:
+ if len(module.params['multiline_delimiter']) != 1:
+ module.fail_json(msg='multiline_delimiter value can only be a '
+ 'single character')
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
'removed in the future')
-def get_config(module, result):
- defaults = module.params['default']
- if defaults is True:
- key = '__configall__'
- else:
- key = '__config__'
+def extract_banners(config):
+ banners = {}
+ banner_cmds = re.findall(r'^banner (\w+)', config, re.M)
+ for cmd in banner_cmds:
+ regex = r'banner %s \^C(.+?)(?=\^C)' % cmd
+ match = re.search(regex, config, re.S)
+ if match:
+ key = 'banner %s' % cmd
+ banners[key] = match.group(1).strip()
+
+ for cmd in banner_cmds:
+ regex = r'banner %s \^C(.+?)(?=\^C)' % cmd
+ match = re.search(regex, config, re.S)
+ if match:
+ config = config.replace(str(match.group(1)), '')
+
+ config = re.sub(r'banner \w+ \^C\^C', '!! banner removed', config)
+ return (config, banners)
+
+def diff_banners(want, have):
+ candidate = {}
+ for key, value in iteritems(want):
+ if value != have.get(key):
+ candidate[key] = value
+ return candidate
- contents = module.params['config'] or result.get(key)
+def load_banners(module, banners):
+ delimiter = module.params['multiline_delimiter']
+ for key, value in iteritems(banners):
+ key += ' %s' % delimiter
+ for cmd in ['config terminal', key, value, delimiter, 'end']:
+ cmd += '\r'
+ module.connection.shell.shell.sendall(cmd)
+ time.sleep(1)
+ module.connection.shell.receive()
+def get_config(module, result):
+ contents = module.params['config']
if not contents:
+ defaults = module.params['defaults']
contents = module.config.get_config(include_defaults=defaults)
- result[key] = contents
- return NetworkConfig(indent=1, contents=contents)
+ contents, banners = extract_banners(contents)
+ return NetworkConfig(indent=1, contents=contents), banners
def get_candidate(module):
candidate = NetworkConfig(indent=1)
+ banners = {}
+
if module.params['src']:
- candidate.load(module.params['src'])
+ src, banners = extract_banners(module.params['src'])
+ candidate.load(src)
+
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
- return candidate
-
-def load_backup(module):
- try:
- module.cli(['exit', 'config replace flash:/ansible-rollback force'])
- except NetworkError:
- module.fail_json(msg='unable to rollback configuration')
-
-def backup_config(module):
- cmd = 'copy running-config flash:/ansible-rollback'
- cmd = Command(cmd, prompt=re.compile('\? $'), response='\n')
- module.cli(cmd)
-def load_config(module, commands, result):
- if not module.check_mode and module.params['update'] != 'check':
- module.config(commands)
- result['changed'] = module.params['update'] != 'check'
- result['updates'] = commands.split('\n')
+ return candidate, banners
-def present(module, result):
+def run(module, result):
match = module.params['match']
replace = module.params['replace']
+ path = module.params['parents']
- candidate = get_candidate(module)
+ candidate, want_banners = get_candidate(module)
if match != 'none':
- config = get_config(module, result)
- configobjs = candidate.difference(config, match=match, replace=replace)
+ config, have_banners = get_config(module, result)
+ path = module.params['parents']
+ configobjs = candidate.difference(config, path=path,match=match,
+ replace=replace)
else:
- config = None
configobjs = candidate.items
+ have_banners = {}
- if configobjs:
- commands = dumps(configobjs, 'commands')
+ banners = diff_banners(want_banners, have_banners)
- if module.params['before']:
- commands[:0] = module.params['before']
+ if configobjs or banners:
+ commands = dumps(configobjs, 'commands').split('\n')
- if module.params['after']:
- commands.extend(module.params['after'])
+ if module.params['lines']:
+ if module.params['before']:
+ commands[:0] = module.params['before']
- # create a backup copy of the current running-config on
- # device flash drive
- backup_config(module)
+ if module.params['after']:
+ commands.extend(module.params['after'])
+
+ result['updates'] = commands
+ result['banners'] = banners
# send the configuration commands to the device and merge
# them with the current running config
- load_config(module, commands, result)
-
- # remove the backup copy of the running-config since its
- # no longer needed
- module.cli('delete /force flash:/ansible-rollback')
+ if not module.check_mode:
+ if commands:
+ module.config(commands)
+ if banners:
+ load_banners(module, banners)
- if module.params['save'] and not module.check_mode:
- module.config.save_config()
+ result['changed'] = True
-def absent(module, result):
- if not module.check_mode:
- module.cli('write erase')
- result['changed'] = True
+ if module.params['save']:
+ if not module.check_mode:
+ module.config.save_config()
+ result['changed'] = True
def main():
+ """ main entry point for module execution
+ """
argument_spec = dict(
+ src=dict(type='path'),
+
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
- src=dict(type='path'),
-
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
+ multiline_delimiter=dict(default='@'),
# this argument is deprecated in favor of setting match: none
# it will be removed in a future version
force=dict(default=False, type='bool'),
- update=dict(choices=['merge', 'check'], default='merge'),
- backup=dict(type='bool', default=False),
-
config=dict(),
- default=dict(type='bool', default=False),
-
- save=dict(type='bool', default=False),
+ defaults=dict(type='bool', default=False),
- state=dict(choices=['present', 'absent'], default='present')
+ backup=dict(type='bool', default=False),
+ save=dict(default=False, type='bool'),
)
mutually_exclusive = [('lines', 'src')]
+ required_if = [('match', 'strict', ['lines']),
+ ('match', 'exact', ['lines']),
+ ('replace', 'block', ['lines'])]
+
module = NetworkModule(argument_spec=argument_spec,
connect_on_load=False,
mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
supports_check_mode=True)
- state = module.params['state']
-
if module.params['force'] is True:
module.params['match'] = 'none'
@@ -396,12 +391,13 @@ def main():
result['__backup__'] = module.config.get_config()
try:
- invoke(state, module, result)
+ run(module, result)
except NetworkError:
- load_backup(module)
exc = get_exception()
+ module.disconnect()
module.fail_json(msg=str(exc))
+ module.disconnect()
module.exit_json(**result)
diff --git a/network/ios/ios_facts.py b/network/ios/ios_facts.py
index d842c2b4c09..abe68ec69ce 100644
--- a/network/ios/ios_facts.py
+++ b/network/ios/ios_facts.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: ios_facts
@@ -35,26 +39,38 @@
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
- with an initial M(!) to specify that a specific subset should
+ with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: cisco
+ password: cisco
+ transport: cli
+
# Collect all facts from the device
- ios_facts:
gather_subset: all
+ provider: "{{ cli }}"
# Collect only the config and default facts
- ios_facts:
gather_subset:
- config
+ provider: "{{ cli }}"
# Do not collect hardware facts
- ios_facts:
gather_subset:
- "!hardware"
+ provider: "{{ cli }}"
"""
RETURN = """
@@ -87,7 +103,7 @@
# hardware
ansible_net_filesystems:
- description: All file system names availabe on the device
+ description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_memfree_mb:
@@ -124,41 +140,38 @@
type: dict
"""
import re
+import itertools
-from ansible.module_utils.basic import get_exception
-from ansible.module_utils.netcli import CommandRunner, AddCommandError
-from ansible.module_utils.ios import NetworkModule
-
+import ansible.module_utils.ios
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import zip
-def add_command(runner, command):
- try:
- runner.add_command(command)
- except AddCommandError:
- # AddCommandError is raised for any issue adding a command to
- # the runner. Silently ignore the exception in this case
- pass
class FactsBase(object):
- def __init__(self, runner):
- self.runner = runner
+ def __init__(self, module):
+ self.module = module
self.facts = dict()
+ self.failed_commands = list()
- self.commands()
+ def run(self, cmd):
+ try:
+ return self.module.cli(cmd)[0]
+ except:
+ self.failed_commands.append(cmd)
-class Default(FactsBase):
- def commands(self):
- add_command(self.runner, 'show version')
+class Default(FactsBase):
def populate(self):
- data = self.runner.get_command('show version')
-
- self.facts['version'] = self.parse_version(data)
- self.facts['serialnum'] = self.parse_serialnum(data)
- self.facts['model'] = self.parse_model(data)
- self.facts['image'] = self.parse_image(data)
- self.facts['hostname'] = self.parse_hostname(data)
+ data = self.run('show version')
+ if data:
+ self.facts['version'] = self.parse_version(data)
+ self.facts['serialnum'] = self.parse_serialnum(data)
+ self.facts['model'] = self.parse_model(data)
+ self.facts['image'] = self.parse_image(data)
+ self.facts['hostname'] = self.parse_hostname(data)
def parse_version(self, data):
match = re.search(r'Version (\S+),', data)
@@ -188,20 +201,17 @@ def parse_serialnum(self, data):
class Hardware(FactsBase):
- def commands(self):
- add_command(self.runner, 'dir all-filesystems | include Directory')
- add_command(self.runner, 'show version')
- add_command(self.runner, 'show memory statistics | include Processor')
-
def populate(self):
- data = self.runner.get_command('dir all-filesystems | include Directory')
- self.facts['filesystems'] = self.parse_filesystems(data)
+ data = self.run('dir | include Directory')
+ if data:
+ self.facts['filesystems'] = self.parse_filesystems(data)
- data = self.runner.get_command('show memory statistics | include Processor')
- match = re.findall('\s(\d+)\s', data)
- if match:
- self.facts['memtotal_mb'] = int(match[0]) / 1024
- self.facts['memfree_mb'] = int(match[1]) / 1024
+ data = self.run('show memory statistics | include Processor')
+ if data:
+ match = re.findall(r'\s(\d+)\s', data)
+ if match:
+ self.facts['memtotal_mb'] = int(match[0]) / 1024
+ self.facts['memfree_mb'] = int(match[1]) / 1024
def parse_filesystems(self, data):
return re.findall(r'^Directory of (\S+)/', data, re.M)
@@ -209,41 +219,37 @@ def parse_filesystems(self, data):
class Config(FactsBase):
- def commands(self):
- add_command(self.runner, 'show running-config')
-
def populate(self):
- self.facts['config'] = self.runner.get_command('show running-config')
+ data = self.run('show running-config')
+ if data:
+ self.facts['config'] = data
class Interfaces(FactsBase):
- def commands(self):
- add_command(self.runner, 'show interfaces')
- add_command(self.runner, 'show ipv6 interface')
- add_command(self.runner, 'show lldp')
- add_command(self.runner, 'show lldp neighbors detail')
-
def populate(self):
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
- data = self.runner.get_command('show interfaces')
- interfaces = self.parse_interfaces(data)
- self.facts['interfaces'] = self.populate_interfaces(interfaces)
+ data = self.run('show interfaces')
+ if data:
+ interfaces = self.parse_interfaces(data)
+ self.facts['interfaces'] = self.populate_interfaces(interfaces)
- data = self.runner.get_command('show ipv6 interface')
- if len(data) > 0:
+ data = self.run('show ipv6 interface')
+ if data:
data = self.parse_interfaces(data)
self.populate_ipv6_interfaces(data)
- if 'LLDP is not enabled' not in self.runner.get_command('show lldp'):
- neighbors = self.runner.get_command('show lldp neighbors detail')
- self.facts['neighbors'] = self.parse_neighbors(neighbors)
+ data = self.run('show lldp')
+ if 'LLDP is not enabled' not in data:
+ neighbors = self.run('show lldp neighbors detail')
+ if neighbors:
+ self.facts['neighbors'] = self.parse_neighbors(neighbors)
def populate_interfaces(self, interfaces):
facts = dict()
- for key, value in interfaces.iteritems():
+ for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
@@ -265,11 +271,11 @@ def populate_interfaces(self, interfaces):
return facts
def populate_ipv6_interfaces(self, data):
- for key, value in data.iteritems():
+ for key, value in iteritems(data):
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'\s+(.+), subnet', value, re.M)
subnets = re.findall(r', subnet is (.+)$', value, re.M)
- for addr, subnet in itertools.izip(addresses, subnets):
+ for addr, subnet in zip(addresses, subnets):
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
@@ -296,6 +302,7 @@ def parse_neighbors(self, neighbors):
def parse_interfaces(self, data):
parsed = dict()
+ key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
@@ -427,27 +434,27 @@ def main():
facts = dict()
facts['gather_subset'] = list(runable_subsets)
- runner = CommandRunner(module)
-
instances = list()
for key in runable_subsets:
- instances.append(FACT_SUBSETS[key](runner))
+ instances.append(FACT_SUBSETS[key](module))
- runner.run()
+ failed_commands = list()
try:
for inst in instances:
inst.populate()
+ failed_commands.extend(inst.failed_commands)
facts.update(inst.facts)
except Exception:
- module.exit_json(out=module.from_json(runner.items))
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
ansible_facts = dict()
- for key, value in facts.iteritems():
+ for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
- module.exit_json(ansible_facts=ansible_facts)
+ module.exit_json(ansible_facts=ansible_facts, failed_commands=failed_commands)
if __name__ == '__main__':
diff --git a/network/iosxr/iosxr_template.py b/network/iosxr/_iosxr_template.py
similarity index 95%
rename from network/iosxr/iosxr_template.py
rename to network/iosxr/_iosxr_template.py
index 55a98fc2c57..315e693c3f2 100644
--- a/network/iosxr/iosxr_template.py
+++ b/network/iosxr/_iosxr_template.py
@@ -15,11 +15,16 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+
DOCUMENTATION = """
---
module: iosxr_template
version_added: "2.1"
-author: "Peter sprygada (@privateip)"
+author: "Peter Sprygada (@privateip)"
short_description: Manage Cisco IOSXR device configurations over SSH
description:
- Manages network device configurations over SSH. This module
@@ -28,6 +33,7 @@
by evaluating the current running-config and only pushing configuration
commands that are not already configured. The config source can
be a set of commands or a template.
+deprecated: Deprecated in 2.2. Use iosxr_config instead
extends_documentation_fragment: iosxr
options:
src:
@@ -96,7 +102,7 @@
responses:
description: The set of responses from issuing the commands on the device
- retured: when not check_mode
+ returned: when not check_mode
type: list
sample: ['...', '...']
"""
diff --git a/network/iosxr/iosxr_command.py b/network/iosxr/iosxr_command.py
index 3e97ab4b3dd..e266700bbf8 100644
--- a/network/iosxr/iosxr_command.py
+++ b/network/iosxr/iosxr_command.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: iosxr_command
@@ -93,20 +97,20 @@
- name: run show version on remote devices
iosxr_command:
commands: show version
- provider "{{ cli }}"
+ provider: "{{ cli }}"
- name: run show version and check to see if output contains iosxr
iosxr_command:
commands: show version
wait_for: result[0] contains IOS-XR
- provider "{{ cli }}"
+ provider: "{{ cli }}"
- name: run multiple commands on remote nodes
iosxr_command:
commands:
- show version
- show interfaces
- provider "{{ cli }}"
+ provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
iosxr_command:
@@ -138,22 +142,24 @@
type: list
sample: ['...', '...']
"""
+import ansible.module_utils.iosxr
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner
from ansible.module_utils.netcli import AddCommandError, FailedConditionsError
-from ansible.module_utils.iosxr import NetworkModule, NetworkError
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.six import string_types
VALID_KEYS = ['command', 'output', 'prompt', 'response']
def to_lines(stdout):
for item in stdout:
- if isinstance(item, basestring):
+ if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module):
for cmd in module.params['commands']:
- if isinstance(cmd, basestring):
+ if isinstance(cmd, string_types):
cmd = dict(command=cmd, output=None)
elif 'command' not in cmd:
module.fail_json(msg='command keyword argument is required')
diff --git a/network/iosxr/iosxr_config.py b/network/iosxr/iosxr_config.py
index 63c2a70218c..d686f83111a 100644
--- a/network/iosxr/iosxr_config.py
+++ b/network/iosxr/iosxr_config.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: iosxr_config
@@ -63,13 +67,13 @@
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
- against the system
+ against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
- stack if a changed needs to be made. Just like with I(before) this
+ stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
@@ -94,10 +98,10 @@
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
- line is not correct
+ line is not correct.
required: false
default: line
- choices: ['line', 'block']
+ choices: ['line', 'block', 'config']
force:
description:
- The force argument instructs the module to not consider the
@@ -105,25 +109,12 @@
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
- the equivalent, set the match argument to none. This argument
+ the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: [ "yes", "no" ]
version_added: "2.2"
- update:
- description:
- - The I(update) argument controls how the configuration statements
- are processed on the remote device. Valid choices for the I(update)
- argument are I(merge) and I(check). When the argument is set to
- I(merge), the configuration changes are merged with the current
- device running configuration. When the argument is set to I(check)
- the configuration updates are determined but not actually configured
- on the remote device.
- required: false
- default: merge
- choices: ['merge', 'replace', 'check']
- version_added: "2.2"
config:
description:
- The module, by default, will connect to the remote device and
@@ -190,7 +181,7 @@
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
- returned: always
+ returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
@@ -206,19 +197,10 @@
DEFAULT_COMMIT_COMMENT = 'configured by iosxr_config'
-def invoke(name, *args, **kwargs):
- func = globals().get(name)
- if func:
- return func(*args, **kwargs)
-
def check_args(module, warnings):
- if module.params['parents']:
- if not module.params['lines'] or module.params['src']:
- warnings.append('ignoring unnecessary argument parents')
- if module.params['match'] == 'none' and module.params['replace']:
- warnings.append('ignoring unnecessary argument replace')
- if module.params['update'] == 'replace' and not module.params['src']:
- module.fail_json(msg='Must specify src when update is `replace`')
+ if module.params['comment']:
+ if len(module.params['comment']) > 60:
+ module.fail_json(msg='comment argument cannot be more than 60 characters')
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
@@ -226,7 +208,7 @@ def check_args(module, warnings):
def get_config(module, result):
- contents = module.params['config'] or result.get('__config__')
+ contents = module.params['config']
if not contents:
contents = module.config.get_config()
return NetworkConfig(indent=1, contents=contents)
@@ -241,73 +223,80 @@ def get_candidate(module):
return candidate
def load_config(module, commands, result):
- replace = module.params['update'] == 'replace'
+ replace = module.params['replace'] == 'config'
comment = module.params['comment']
commit = not module.check_mode
+
diff = module.config.load_config(commands, replace=replace, commit=commit,
comment=comment)
- result['diff'] = dict(prepared=diff)
- result['changed'] = True
+
+ if diff:
+ result['diff'] = dict(prepared=diff)
+ result['changed'] = True
def run(module, result):
match = module.params['match']
replace = module.params['replace']
- update = module.params['update']
+ path = module.params['parents']
candidate = get_candidate(module)
- if match != 'none' and update != 'replace':
+ if match != 'none' and replace != 'config':
config = get_config(module, result)
- configobjs = candidate.difference(config, match=match, replace=replace)
+ configobjs = candidate.difference(config, path=path, match=match,
+ replace=replace)
else:
- config = None
configobjs = candidate.items
if configobjs:
- commands = dumps(configobjs, 'commands')
+ commands = dumps(configobjs, 'commands').split('\n')
- if module.params['before']:
- commands[:0] = module.params['before']
+ if module.params['lines']:
+ if module.params['before']:
+ commands[:0] = module.params['before']
- if module.params['after']:
- commands.extend(module.params['after'])
+ if module.params['after']:
+ commands.extend(module.params['after'])
- result['updates'] = commands.split('\n')
+ result['updates'] = commands
- if update != 'check':
- load_config(module, commands, result)
+ load_config(module, commands, result)
def main():
"""main entry point for module execution
"""
argument_spec = dict(
+ src=dict(type='path'),
+
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
- src=dict(type='path'),
-
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
- replace=dict(default='line', choices=['line', 'block']),
-
- update=dict(choices=['merge', 'replace', 'check'], default='merge'),
- backup=dict(type='bool', default=False),
- comment=dict(default=DEFAULT_COMMIT_COMMENT),
+ replace=dict(default='line', choices=['line', 'block', 'config']),
# this argument is deprecated in favor of setting match: none
# it will be removed in a future version
force=dict(default=False, type='bool'),
config=dict(),
+ backup=dict(type='bool', default=False),
+ comment=dict(default=DEFAULT_COMMIT_COMMENT),
)
mutually_exclusive = [('lines', 'src')]
+ required_if = [('match', 'strict', ['lines']),
+ ('match', 'exact', ['lines']),
+ ('replace', 'block', ['lines']),
+ ('replace', 'config', ['src'])]
+
module = NetworkModule(argument_spec=argument_spec,
connect_on_load=False,
mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
supports_check_mode=True)
if module.params['force'] is True:
@@ -319,9 +308,7 @@ def main():
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
- config = module.config.get_config()
- result['__config__'] = config
- result['__backup__'] = config
+ result['__backup__'] = module.config.get_config()
try:
run(module, result)
diff --git a/network/iosxr/iosxr_facts.py b/network/iosxr/iosxr_facts.py
index f045080ee63..9f93d06125c 100644
--- a/network/iosxr/iosxr_facts.py
+++ b/network/iosxr/iosxr_facts.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: iosxr_facts
@@ -79,7 +83,7 @@
# hardware
ansible_net_filesystems:
- description: All file system names availabe on the device
+ description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_memfree_mb:
@@ -116,11 +120,12 @@
type: dict
"""
import re
-import itertools
-from ansible.module_utils.basic import get_exception
+import ansible.module_utils.iosxr
from ansible.module_utils.netcli import CommandRunner, AddCommandError
-from ansible.module_utils.iosxr import NetworkModule
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import zip
def add_command(runner, command):
@@ -139,6 +144,9 @@ def __init__(self, runner):
self.commands()
+ def commands(self):
+ raise NotImplementedError
+
class Default(FactsBase):
def commands(self):
@@ -223,7 +231,7 @@ def populate(self):
def populate_interfaces(self, interfaces):
facts = dict()
- for key, value in interfaces.iteritems():
+ for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
@@ -244,11 +252,11 @@ def populate_interfaces(self, interfaces):
return facts
def populate_ipv6_interfaces(self, data):
- for key, value in data.iteritems():
+ for key, value in iteritems(data):
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'\s+(.+), subnet', value, re.M)
subnets = re.findall(r', subnet is (.+)$', value, re.M)
- for addr, subnet in itertools.izip(addresses, subnets):
+ for addr, subnet in zip(addresses, subnets):
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
@@ -276,6 +284,7 @@ def parse_neighbors(self, neighbors):
def parse_interfaces(self, data):
parsed = dict()
+ key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
@@ -416,11 +425,10 @@ def main():
inst.populate()
facts.update(inst.facts)
except Exception:
- raise
module.exit_json(out=module.from_json(runner.items))
ansible_facts = dict()
- for key, value in facts.iteritems():
+ for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
diff --git a/network/junos/junos_template.py b/network/junos/_junos_template.py
similarity index 80%
rename from network/junos/junos_template.py
rename to network/junos/_junos_template.py
index 8bbdbc19f81..bd0ecf371ea 100644
--- a/network/junos/junos_template.py
+++ b/network/junos/_junos_template.py
@@ -16,6 +16,11 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+
DOCUMENTATION = """
---
module: junos_template
@@ -27,6 +32,7 @@
from a template file onto a remote device running Junos. The
module will return the differences in configuration if the diff
option is specified on the Ansible command line
+deprecated: Deprecated in 2.2. Use junos_config instead
extends_documentation_fragment: junos
options:
src:
@@ -100,6 +106,10 @@
src: config.j2
action: overwrite
"""
+import ansible.module_utils.junos
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.network import NetworkModule, NetworkError
DEFAULT_COMMENT = 'configured by junos_template'
@@ -115,37 +125,45 @@ def main():
transport=dict(default='netconf', choices=['netconf'])
)
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = NetworkModule(argument_spec=argument_spec,
+ supports_check_mode=True)
comment = module.params['comment']
confirm = module.params['confirm']
commit = not module.check_mode
+ replace = False
+ overwrite = False
+
action = module.params['action']
+ if action == 'overwrite':
+ overwrite = True
+ elif action == 'replace':
+ replace = True
src = module.params['src']
fmt = module.params['config_format']
if action == 'overwrite' and fmt == 'set':
module.fail_json(msg="overwrite cannot be used when format is "
- "set per junos documentation")
+ "set per junos-pyez documentation")
results = dict(changed=False)
- results['_backup'] = str(module.get_config()).strip()
+ results['_backup'] = unicode(module.config.get_config()).strip()
- diff = module.load_config(src, action=action, comment=comment,
- format=fmt, commit=commit, confirm=confirm)
+ try:
+ diff = module.config.load_config(src, commit=commit, replace=replace,
+ confirm=confirm, comment=comment, config_format=fmt)
- if diff:
- results['changed'] = True
- results['diff'] = dict(prepared=diff)
+ if diff:
+ results['changed'] = True
+ results['diff'] = dict(prepared=diff)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**results)
-from ansible.module_utils.basic import *
-from ansible.module_utils.junos import *
-
if __name__ == '__main__':
main()
diff --git a/network/junos/junos_command.py b/network/junos/junos_command.py
index 6a7f47c6951..c54061834a0 100644
--- a/network/junos/junos_command.py
+++ b/network/junos/junos_command.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: junos_command
@@ -34,9 +38,11 @@
options:
commands:
description:
- - An ordered set of CLI commands to be executed on the remote
- device. The output from the commands is then returned to
- the playbook in the task results.
+ - The C(commands) to send to the remote device over the Netconf
+ transport. The resulting output from the command
+ is returned. If the I(wait_for) argument is provided, the
+ module is not returned until the condition is satisfied or
+ the number of I(retries) has been exceeded.
required: false
default: null
rpcs:
@@ -46,17 +52,29 @@
is return to the playbook via the modules results dictionary.
required: false
default: null
- waitfor:
+ wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
- the task to wait for a particular conditional or set of
- conditionals to be true before moving forward. If the
- conditional is not true by the configured retries, the
- :1
- task fails. See examples.
+ the task to wait for a particular conditional to be true
+ before moving forward. If the conditional is not true
+ by the configured retries, the task fails. See examples.
required: false
default: null
+ aliases: ['waitfor']
+ version_added: "2.2"
+ match:
+ description:
+ - The I(match) argument is used in conjunction with the
+ I(wait_for) argument to specify the match policy. Valid
+ values are C(all) or C(any). If the value is set to C(all)
+ then all conditionals in the I(wait_for) must be satisfied. If
+ the value is set to C(any) then only one of the values must be
+ satisfied.
+ required: false
+ default: all
+ choices: ['any', 'all']
+ version_added: "2.2"
retries:
description:
- Specifies the number of retries a command should by tried
@@ -89,12 +107,18 @@
"""
EXAMPLES = """
-# the required set of connection arguments have been purposely left off
-# the examples for brevity
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ netconf:
+ host: "{{ inventory_hostname }}"
+ username: ansible
+ password: Ansible
- name: run a set of commands
junos_command:
commands: ['show version', 'show ip route']
+ provider: "{{ netconf }}"
- name: run a command with a conditional applied to the second command
junos_command:
@@ -103,12 +127,14 @@
- show interfaces fxp0
waitfor:
- "result[1].interface-information.physical-interface.name eq fxp0"
+ provider: "{{ netconf }}"
- name: collect interface information using rpc
junos_command:
rpcs:
- "get_interface_information interface=em0 media=True"
- "get_interface_information interface=fxp0 media=True"
+ provider: "{{ netconf }}"
"""
RETURN = """
@@ -124,64 +150,67 @@
type: list
sample: [['...', '...'], ['...', '...']]
-xml:
- description: The raw XML reply from the device
- returned: when format is xml
- type: list
- sample: [['...', '...'], ['...', '...']]
-
failed_conditionals:
description: the conditionals that failed
- retured: failed
+ returned: failed
type: list
sample: ['...', '...']
"""
-import shlex
-
-def split(value):
- lex = shlex.shlex(value)
- lex.quotes = '"'
- lex.whitespace_split = True
- lex.commenters = ''
- return list(lex)
-
-def rpc_args(args):
- kwargs = dict()
- args = split(args)
- name = args.pop(0)
- for arg in args:
- key, value = arg.split('=')
- if str(value).upper() in ['TRUE', 'FALSE']:
- kwargs[key] = bool(value)
- elif re.match(r'^[0-9]+$', value):
- kwargs[key] = int(value)
- else:
- kwargs[key] = str(value)
- return (name, kwargs)
-def parse_rpcs(rpcs):
- parsed = list()
- for rpc in (rpcs or list()):
- parsed.append(rpc_args(rpc))
- return parsed
+import ansible.module_utils.junos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.netcli import CommandRunner
+from ansible.module_utils.netcli import AddCommandError, FailedConditionsError
+from ansible.module_utils.netcli import FailedConditionalError, AddConditionError
+from ansible.module_utils.junos import xml_to_json
+from ansible.module_utils.six import string_types
-def run_rpcs(module, items, format):
- response = list()
- for name, kwargs in items:
- kwargs['format'] = format
- result = module.connection.rpc(name, **kwargs)
- if format == 'text':
- response.append(result.text)
- else:
- response.append(result)
- return response
+VALID_KEYS = {
+ 'cli': frozenset(['command', 'output', 'prompt', 'response']),
+ 'rpc': frozenset(['command', 'output'])
+}
-def iterlines(stdout):
+
+def to_lines(stdout):
for item in stdout:
- if isinstance(item, basestring):
+ if isinstance(item, string_types):
item = str(item).split('\n')
yield item
+def parse(module, command_type):
+ if command_type == 'cli':
+ items = module.params['commands']
+ elif command_type == 'rpc':
+ items = module.params['rpcs']
+
+ parsed = list()
+ for item in (items or list()):
+ if isinstance(item, string_types):
+ item = dict(command=item, output=None)
+ elif 'command' not in item:
+ module.fail_json(msg='command keyword argument is required')
+ elif item.get('output') not in [None, 'text', 'xml']:
+ module.fail_json(msg='invalid output specified for command'
+ 'Supported values are `text` or `xml`')
+ elif not set(item.keys()).issubset(VALID_KEYS[command_type]):
+ module.fail_json(msg='unknown command keyword specified. Valid '
+ 'values are %s' % ', '.join(VALID_KEYS[command_type]))
+
+ if not item['output']:
+ item['output'] = module.params['display']
+
+ item['command_type'] = command_type
+
+ # show configuration [options] will return as text
+ if item['command'].startswith('show configuration'):
+ item['output'] = 'text'
+
+ parsed.append(item)
+
+ return parsed
+
+
def main():
"""main entry point for Ansible module
"""
@@ -189,76 +218,87 @@ def main():
spec = dict(
commands=dict(type='list'),
rpcs=dict(type='list'),
- format=dict(default='xml', choices=['text', 'xml']),
- waitfor=dict(type='list'),
+
+ display=dict(default='xml', choices=['text', 'xml'],
+ aliases=['format', 'output']),
+
+ wait_for=dict(type='list', aliases=['waitfor']),
+ match=dict(default='all', choices=['all', 'any']),
+
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int'),
+
transport=dict(default='netconf', choices=['netconf'])
)
mutually_exclusive = [('commands', 'rpcs')]
- module = get_module(argument_spec=spec,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=True)
-
+ module = NetworkModule(argument_spec=spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
- commands = module.params['commands']
- rpcs = parse_rpcs(module.params['rpcs'])
+ commands = list()
+ for key in VALID_KEYS.keys():
+ commands.extend(list(parse(module, key)))
- encoding = module.params['format']
- retries = module.params['retries']
- interval = module.params['interval']
+ conditionals = module.params['wait_for'] or list()
+ warnings = list()
- try:
- queue = set()
- for entry in (module.params['waitfor'] or list()):
- queue.add(Conditional(entry))
- except AttributeError:
- exc = get_exception()
- module.fail_json(msg=exc.message)
-
- result = dict(changed=False)
+ runner = CommandRunner(module)
- while retries > 0:
- if commands:
- response = module.run_commands(commands, format=encoding)
+ for cmd in commands:
+ if module.check_mode and not cmd['command'].startswith('show'):
+ warnings.append('only show commands are supported when using '
+ 'check mode, not executing `%s`' % cmd['command'])
else:
- response = run_rpcs(module, rpcs, format=encoding)
+ if cmd['command'].startswith('co'):
+ module.fail_json(msg='junos_command does not support running '
+ 'config mode commands. Please use '
+ 'junos_config instead')
+ try:
+ runner.add_command(**cmd)
+ except AddCommandError:
+ exc = get_exception()
+ warnings.append('duplicate command detected: %s' % cmd)
- result['stdout'] = response
- xmlout = list()
+ try:
+ for item in conditionals:
+ runner.add_conditional(item)
+ except (ValueError, AddConditionError):
+ exc = get_exception()
+ module.fail_json(msg=str(exc), condition=exc.condition)
- for index in range(0, len(response)):
- if encoding == 'xml':
- xmlout.append(xml_to_string(response[index]))
- response[index] = xml_to_json(response[index])
+ runner.retries = module.params['retries']
+ runner.interval = module.params['interval']
+ runner.match = module.params['match']
- for item in list(queue):
- if item(response):
- queue.remove(item)
+ try:
+ runner.run()
+ except FailedConditionsError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
+ except FailedConditionalError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), failed_conditional=exc.failed_conditional)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
- if not queue:
- break
+ result = dict(changed=False, stdout=list())
- time.sleep(interval)
- retries -= 1
- else:
- failed_conditions = [item.raw for item in queue]
- module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions)
+ for cmd in commands:
+ try:
+ output = runner.get_command(cmd['command'], cmd.get('output'))
+ except ValueError:
+ output = 'command not executed due to check_mode, see warnings'
+ result['stdout'].append(output)
- if xmlout:
- result['xml'] = xmlout
+ result['warnings'] = warnings
+ result['stdout_lines'] = list(to_lines(result['stdout']))
- result['stdout_lines'] = list(iterlines(result['stdout']))
module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.junos import *
-
if __name__ == '__main__':
main()
-
diff --git a/network/junos/junos_config.py b/network/junos/junos_config.py
index 5a365a16647..a1212e974c3 100644
--- a/network/junos/junos_config.py
+++ b/network/junos/junos_config.py
@@ -16,30 +16,51 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: junos_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
-short_description: Manage configuration on remote devices running Junos
+short_description: Manage configuration on devices running Juniper JUNOS
description:
- - Provides an abstraction for working
- with the configuration running on remote Junos devices. It can perform
- operations that influence the configuration state.
- - This module provides an implementation for configuring Juniper
- JUNOS devices. The configuration statements must start with either
- `set` or `delete` and are compared against the current device
- configuration and only changes are pushed to the device.
+ - This module provides an implementation for working with the active
+ configuration running on Juniper JUNOS devices. It provides a set
+ of arguments for loading configuration, performing rollback operations
+ and zeroing the active configuration on the device.
extends_documentation_fragment: junos
options:
lines:
description:
- - The path to the config source. The source can be either a
- file with config or a template that will be merged during
- runtime. By default the task will search for the source
- file in role or playbook root folder in templates directory.
+ - This argument takes a list of C(set) or C(delete) configuration
+ lines to push into the remote device. Each line must start with
+ either C(set) or C(delete). This argument is mutually exclusive
+ with the I(src) argument.
+ required: false
+ default: null
+ src:
+ description:
+ - The I(src) argument provides a path to the configuration file
+ to load into the remote system. The path can either be a full
+ system path to the configuration file if the value starts with /
+ or relative to the root of the implemented role or playbook.
+ This argument is mutually exclusive with the I(lines) argument.
+ required: false
+ default: null
+ version_added: "2.2"
+ src_format:
+ description:
+ - The I(src_format) argument specifies the format of the configuration
+ found int I(src). If the I(src_format) argument is not provided,
+ the module will attempt to determine the format of the configuration
+ file specified in I(src).
required: false
default: null
+ choices: ['xml', 'set', 'text', 'json']
+ version_added: "2.2"
rollback:
description:
- The C(rollback) argument instructs the module to rollback the
@@ -79,131 +100,248 @@
replace the current configuration hierarchy with the one specified
in the corresponding hierarchy of the source configuration loaded
from this module.
- required: true
- choices:
- - yes
- - no
+ - Note this argument should be considered deprecated. To achieve
+ the equivalent, set the I(update) argument to C(replace). This argument
+ will be removed in a future release.
+ required: false
+ choices: ['yes', 'no']
default: false
+ backup:
+ description:
+ - This argument will cause the module to create a full backup of
+ the current C(running-config) from the remote device before any
+ changes are made. The backup file is written to the C(backup)
+ folder in the playbook root directory. If the directory does not
+ exist, it is created.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ version_added: "2.2"
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
- the remote device being managed
+ the remote device being managed.
"""
EXAMPLES = """
-- name: load configuration lines in device
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ netconf:
+ host: "{{ inventory_hostname }}"
+ username: ansible
+ password: Ansible
+
+- name: load configure file into device
junos_config:
- lines:
- - set system host-name {{ inventory_hostname }}
- - delete interfaces ge-0/0/0 description
+ src: srx.cfg
comment: update config
+ provider: "{{ netconf }}"
- name: rollback the configuration to id 10
junos_config:
rollback: 10
+ provider: "{{ netconf }}"
- name: zero out the current configuration
junos_config:
zeroize: yes
+ provider: "{{ netconf }}"
-- name: confirm a candidate configuration
+- name: confirm a previous commit
junos_config:
+ provider: "{{ netconf }}"
"""
-import re
+RETURN = """
+backup_path:
+ description: The full path to the backup file
+ returned: when backup is yes
+ type: path
+ sample: /playbooks/ansible/backup/config.2016-07-16@22:28:34
+"""
+import json
+
+from xml.etree import ElementTree
+
+import ansible.module_utils.junos
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.netcfg import NetworkConfig
+
DEFAULT_COMMENT = 'configured by junos_config'
-def diff_config(candidate, config):
- updates = set()
+def guess_format(config):
+ try:
+ json.loads(config)
+ return 'json'
+ except ValueError:
+ pass
+
+ try:
+ ElementTree.fromstring(config)
+ return 'xml'
+ except ElementTree.ParseError:
+ pass
+
+ if config.startswith('set') or config.startswith('delete'):
+ return 'set'
+
+ return 'text'
+
+def config_to_commands(config):
+ set_format = config.startswith('set') or config.startswith('delete')
+ candidate = NetworkConfig(indent=4, contents=config, device_os='junos')
+ if not set_format:
+ candidate = [c.line for c in candidate.items]
+ commands = list()
+ # this filters out less specific lines
+ for item in candidate:
+ for index, entry in enumerate(commands):
+ if item.startswith(entry):
+ del commands[index]
+ break
+ commands.append(item)
- for line in candidate:
- parts = line.split()
- action = parts[0]
- cfgline = ' '.join(parts[1:])
+ else:
+ commands = str(candidate).split('\n')
- if action not in ['set', 'delete']:
- module.fail_json(msg='line must start with either `set` or `delete`')
+ return commands
- elif action == 'set' and cfgline not in config:
- updates.add(line)
+def diff_commands(commands, config):
+ config = [unicode(c).replace("'", '') for c in config]
- elif action == 'delete' and not config:
- updates.add(line)
+ updates = list()
+ visited = set()
- elif action == 'delete':
- for cfg in config:
- if cfg[4:].startswith(cfgline):
- updates.add(line)
+ for item in commands:
+ if len(item) > 0:
+ if not item.startswith('set') and not item.startswith('delete'):
+ raise ValueError('line must start with either `set` or `delete`')
- return list(updates)
+ elif item.startswith('set') and item[4:] not in config:
+ updates.append(item)
-def main():
+ elif item.startswith('delete'):
+ for entry in config:
+ if entry.startswith(item[7:]) and item not in visited:
+ updates.append(item)
+ visited.add(item)
- argument_spec = dict(
- lines=dict(type='list'),
- rollback=dict(type='int'),
- zeroize=dict(default=False, type='bool'),
- confirm=dict(default=0, type='int'),
- comment=dict(default=DEFAULT_COMMENT),
- replace=dict(default=False, type='bool'),
- transport=dict(default='netconf', choices=['netconf'])
- )
+ return updates
- mutually_exclusive = [('lines', 'rollback'), ('lines', 'zeroize'),
- ('rollback', 'zeroize')]
+def load_config(module, result):
+ candidate = module.params['lines'] or module.params['src']
+ if isinstance(candidate, basestring):
+ candidate = candidate.split('\n')
+
+ kwargs = dict()
+ kwargs['comment'] = module.params['comment']
+ kwargs['confirm'] = module.params['confirm']
+ kwargs['replace'] = module.params['replace']
+ kwargs['commit'] = not module.check_mode
+
+ if module.params['src']:
+ config_format = module.params['src_format'] or guess_format(str(candidate))
+ elif module.params['lines']:
+ config_format = 'set'
+ kwargs['config_format'] = config_format
- module = get_module(argument_spec=argument_spec,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=True)
+ # this is done to filter out `delete ...` statements which map to
+ # nothing in the config as that will cause an exception to be raised
+ if config_format == 'set':
+ config = module.config.get_config()
+ config = config_to_commands(config)
+ candidate = diff_commands(candidate, config)
+ diff = module.config.load_config(candidate, **kwargs)
+
+ if diff:
+ result['changed'] = True
+ result['diff'] = dict(prepared=diff)
+
+def rollback_config(module, result):
rollback = module.params['rollback']
- zeroize = module.params['zeroize']
- comment = module.params['comment']
- confirm = module.params['confirm']
+ kwargs = dict(comment=module.param['comment'],
+ commit=not module.check_mode)
+
+ diff = module.connection.rollback_config(rollback, **kwargs)
- if module.params['replace']:
- action = 'replace'
+ if diff:
+ result['changed'] = True
+ result['diff'] = dict(prepared=diff)
+
+def zeroize_config(module, result):
+ if not module.check_mode:
+ module.cli.run_commands('request system zeroize')
+ result['changed'] = True
+
+def confirm_config(module, result):
+ checkonly = module.check_mode
+ result['changed'] = module.connection.confirm_commit(checkonly)
+
+def run(module, result):
+ if module.params['rollback']:
+ return rollback_config(module, result)
+ elif module.params['zeroize']:
+ return zeroize_config(module, result)
+ elif not any((module.params['src'], module.params['lines'])):
+ return confirm_config(module, result)
else:
- action = 'merge'
+ return load_config(module, result)
- lines = module.params['lines']
- commit = not module.check_mode
- results = dict(changed=False)
+def main():
+ """ main entry point for module execution
+ """
+ argument_spec = dict(
+ lines=dict(type='list'),
- if lines:
- config = str(module.get_config(config_format='set')).split('\n')
- updates = diff_config(lines, config)
+ src=dict(type='path'),
+ src_format=dict(choices=['xml', 'text', 'set', 'json']),
+
+ # update operations
+ replace=dict(default=False, type='bool'),
+ confirm=dict(default=0, type='int'),
+ comment=dict(default=DEFAULT_COMMENT),
+
+ # config operations
+ backup=dict(type='bool', default=False),
+ rollback=dict(type='int'),
+ zeroize=dict(default=False, type='bool'),
+
+ transport=dict(default='netconf', choices=['netconf'])
+ )
+
+ mutually_exclusive = [('lines', 'rollback'), ('lines', 'zeroize'),
+ ('rollback', 'zeroize'), ('lines', 'src'),
+ ('src', 'zeroize'), ('src', 'rollback')]
- if updates:
- updates = '\n'.join(updates)
- diff = module.load_config(updates, action=action, comment=comment,
- format='set', commit=commit, confirm=confirm)
+ required_if = [('replace', True, ['src'])]
- if diff:
- results['changed'] = True
- results['diff'] = dict(prepared=diff)
+ module = NetworkModule(argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True)
- elif rollback is not None:
- diff = module.rollback_config(rollback, commit=commit)
- if diff:
- results['changed'] = True
- results['diff'] = dict(prepared=diff)
+ result = dict(changed=False)
- elif zeroize:
- if not module.check_mode:
- module.run_commands('request system zeroize')
- results['changed'] = True
+ if module.params['backup']:
+ result['__backup__'] = module.config.get_config()
- module.exit_json(**results)
+ try:
+ run(module, result)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), **exc.kwargs)
+ module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
diff --git a/network/junos/junos_facts.py b/network/junos/junos_facts.py
index c310e78f6ca..383eb90bcf2 100644
--- a/network/junos/junos_facts.py
+++ b/network/junos/junos_facts.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: junos_facts
@@ -44,12 +48,12 @@
- The C(config_format) argument is used to specify the desired
format of the configuration file. Devices support three
configuration file formats. By default, the configuration
- from the device is returned as text. The other options include
- set and xml. If the xml option is choosen, the configuration file
- is returned as both xml and json.
+ from the device is returned as text. The other option xml.
+ If the xml option is chosen, the configuration file is
+ returned as both xml and json.
required: false
default: text
- choices: ['xml', 'text', 'set']
+ choices: ['xml', 'text']
requirements:
- junos-eznc
notes:
@@ -68,10 +72,10 @@
junos_facts:
config: yes
-- name: collect default set of facts and configuration in set format
+- name: collect default set of facts and configuration in text format
junos_facts:
config: yes
- config_format: set
+ config_format: text
- name: collect default set of facts and configuration in XML and JSON format
junos_facts:
@@ -81,26 +85,30 @@
RETURN = """
ansible_facts:
- descrption: Returns the facts collect from the device
+ description: Returns the facts collect from the device
returned: always
type: dict
"""
+import ansible.module_utils.junos
+
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.junos import xml_to_string, xml_to_json
def main():
""" Main entry point for AnsibleModule
"""
spec = dict(
config=dict(type='bool'),
- config_format=dict(default='text', choices=['xml', 'set', 'text']),
+ config_format=dict(default='text', choices=['xml', 'text']),
transport=dict(default='netconf', choices=['netconf'])
)
- module = get_module(argument_spec=spec,
- supports_check_mode=True)
+ module = NetworkModule(argument_spec=spec,
+ supports_check_mode=True)
result = dict(changed=False)
- facts = module.get_facts()
+ facts = module.connection.get_facts()
if '2RE' in facts:
facts['has_2RE'] = facts['2RE']
@@ -110,19 +118,17 @@ def main():
if module.params['config'] is True:
config_format = module.params['config_format']
- resp_config = module.get_config( config_format=config_format)
+ resp_config = module.config.get_config(config_format=config_format)
- if config_format in ['text', 'set']:
- facts['config'] = resp_config
+ if config_format in ['text']:
+ facts['config'] = resp_config
elif config_format == "xml":
- facts['config'] = xml_to_string(resp_config)
- facts['config_json'] = xml_to_json(resp_config)
+ facts['config'] = xml_to_string(resp_config)
+ facts['config_json'] = xml_to_json(resp_config)
result['ansible_facts'] = facts
module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
diff --git a/network/junos/junos_netconf.py b/network/junos/junos_netconf.py
index 3db3fa7347c..6f47daa8db0 100644
--- a/network/junos/junos_netconf.py
+++ b/network/junos/junos_netconf.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: junos_netconf
@@ -30,13 +34,15 @@
resources as defined in RFC 6242.
extends_documentation_fragment: junos
options:
- listens_on:
+ netconf_port:
description:
- This argument specifies the port the netconf service should
listen on for SSH connections. The default port as defined
in RFC 6242 is 830.
- required: true
+ required: false
default: 830
+ aliases: ['listens_on']
+ version_added: "2.2"
state:
description:
- Specifies the state of the M(junos_netconf) resource on
@@ -44,26 +50,47 @@
I(present) the netconf service will be configured. If the
I(state) argument is set to I(absent) the netconf service
will be removed from the configuration.
- required: true
+ required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: ansible
+ password: Ansible
+ transport: cli
+
- name: enable netconf service on port 830
junos_netconf:
listens_on: 830
state: present
+ provider: "{{ cli }}"
- name: disable netconf service
junos_netconf:
state: absent
+ provider: "{{ cli }}"
"""
RETURN = """
+commands:
+ description: Returns the command sent to the remote device
+ returned: when changed is True
+ type: str
+ sample: 'set system services netconf ssh port 830'
"""
import re
+import ansible.module_utils.junos
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.network import NetworkModule, NetworkError
+
def parse_port(config):
match = re.search(r'port (\d+)', config)
if match:
@@ -71,7 +98,7 @@ def parse_port(config):
def get_instance(module):
cmd = 'show configuration system services netconf'
- cfg = module.run_commands(cmd)[0]
+ cfg = module.cli(cmd)[0]
result = dict(state='absent')
if cfg:
result = dict(state='present')
@@ -83,40 +110,43 @@ def main():
"""
argument_spec = dict(
- listens_on=dict(type='int', default=830),
+ netconf_port=dict(type='int', default=830, aliases=['listens_on']),
state=dict(default='present', choices=['present', 'absent']),
transport=dict(default='cli', choices=['cli'])
)
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = NetworkModule(argument_spec=argument_spec,
+ supports_check_mode=True)
state = module.params['state']
- port = module.params['listens_on']
+ port = module.params['netconf_port']
result = dict(changed=False)
instance = get_instance(module)
- commands = None
if state == 'present' and instance.get('state') == 'absent':
commands = 'set system services netconf ssh port %s' % port
+ elif state == 'present' and port != instance.get('port'):
+ commands = 'set system services netconf ssh port %s' % port
elif state == 'absent' and instance.get('state') == 'present':
commands = 'delete system services netconf'
- elif port != instance.get('port'):
- commands = 'set system services netconf ssh port %s' % port
+ else:
+ commands = None
if commands:
if not module.check_mode:
- comment = 'configuration updated by junos_netconf'
- module.connection.configure(commands, comment=comment)
+ try:
+ comment = 'configuration updated by junos_netconf'
+ module.config(commands, comment=comment)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), **exc.kwargs)
result['changed'] = True
+ result['commands'] = commands
module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
diff --git a/network/junos/junos_package.py b/network/junos/junos_package.py
index f11e121b241..c457be8228c 100644
--- a/network/junos/junos_package.py
+++ b/network/junos/junos_package.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: junos_package
@@ -92,6 +96,9 @@
src: junos-vsrx-12.1X46-D10.2-domestic.tgz
reboot: no
"""
+import ansible.module_utils.junos
+
+from ansible.module_utils.network import NetworkModule
try:
from jnpr.junos.utils.sw import SW
@@ -127,8 +134,8 @@ def main():
transport=dict(default='netconf', choices=['netconf'])
)
- module = get_module(argument_spec=spec,
- supports_check_mode=True)
+ module = NetworkModule(argument_spec=spec,
+ supports_check_mode=True)
if not HAS_SW:
module.fail_json(msg='Missing jnpr.junos.utils.sw module')
@@ -137,8 +144,8 @@ def main():
do_upgrade = module.params['force'] or False
if not module.params['force']:
- has_ver = module.get_facts().get('version')
- wants_ver = module.params['version'] or package_version(module)
+ has_ver = module.connection.get_facts().get('version')
+ wants_ver = module.params['version']
do_upgrade = has_ver != wants_ver
if do_upgrade:
@@ -148,8 +155,6 @@ def main():
module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
diff --git a/network/netvisor/__init__.py b/network/netvisor/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/network/netvisor/pn_cluster.py b/network/netvisor/pn_cluster.py
new file mode 100644
index 00000000000..de02198e2d9
--- /dev/null
+++ b/network/netvisor/pn_cluster.py
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+""" PN CLI cluster-create/cluster-delete """
+
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+import shlex
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: pn_cluster
+author: "Pluribus Networks (@amitsi)"
+version_added: "2.2"
+version: 1.0
+short_description: CLI command to create/delete a cluster.
+description:
+ - Execute cluster-create or cluster-delete command.
+ - A cluster allows two switches to cooperate in high-availability (HA)
+ deployments. The nodes that form the cluster must be members of the same
+ fabric. Clusters are typically used in conjunction with a virtual link
+ aggregation group (VLAG) that allows links physically connected to two
+ separate switches appear as a single trunk to a third device. The third
+ device can be a switch,server, or any Ethernet device.
+options:
+ pn_cliusername:
+ description:
+ - Provide login username if user is not root.
+ required: False
+ pn_clipassword:
+ description:
+ - Provide login password if user is not root.
+ required: False
+ pn_cliswitch:
+ description:
+ - Target switch to run the cli on.
+ required: False
+ state:
+ description:
+ - Specify action to perform. Use 'present' to create cluster and 'absent'
+ to delete cluster.
+ required: true
+ choices: ['present', 'absent']
+ pn_name:
+ description:
+ - Specify the name of the cluster.
+ required: true
+ pn_cluster_node1:
+ description:
+ - Specify the name of the first switch in the cluster.
+ - Required for 'cluster-create'.
+ pn_cluster_node2:
+ description:
+ - Specify the name of the second switch in the cluster.
+ - Required for 'cluster-create'.
+ pn_validate:
+ description:
+ - Validate the inter-switch links and state of switches in the cluster.
+ choices: ['validate', 'no-validate']
+"""
+
+EXAMPLES = """
+- name: create spine cluster
+ pn_cluster:
+ state: 'present'
+ pn_name: 'spine-cluster'
+ pn_cluster_node1: 'spine01'
+ pn_cluster_node2: 'spine02'
+ pn_validate: validate
+ pn_quiet: True
+
+- name: delete spine cluster
+ pn_cluster:
+ state: 'absent'
+ pn_name: 'spine-cluster'
+ pn_quiet: True
+"""
+
+RETURN = """
+command:
+ description: The CLI command run on the target node(s).
+stdout:
+ description: The set of responses from the cluster command.
+ returned: always
+ type: list
+stderr:
+ description: The set of error responses from the cluster command.
+ returned: on error
+ type: list
+changed:
+ description: Indicates whether the CLI caused changes on the target.
+ returned: always
+ type: bool
+"""
+
+NAME_EXISTS = None
+NODE1_EXISTS = None
+NODE2_EXISTS = None
+
+
+def pn_cli(module):
+ """
+ This method is to generate the cli portion to launch the Netvisor cli.
+ It parses the username, password, switch parameters from module.
+ :param module: The Ansible module to fetch username, password and switch
+ :return: returns the cli string for further processing
+ """
+ username = module.params['pn_cliusername']
+ password = module.params['pn_clipassword']
+ cliswitch = module.params['pn_cliswitch']
+
+ if username and password:
+ cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
+ else:
+ cli = '/usr/bin/cli --quiet '
+
+ if cliswitch == 'local':
+ cli += ' switch-local '
+ else:
+ cli += ' switch ' + cliswitch
+ return cli
+
+
+def check_cli(module, cli):
+ """
+ This method checks for idempotency using the cluster-show command.
+ If a cluster with given name exists, return NAME_EXISTS as True else False.
+ If the given cluster-node-1 is already a part of another cluster, return
+ NODE1_EXISTS as True else False.
+ If the given cluster-node-2 is already a part of another cluster, return
+ NODE2_EXISTS as True else False.
+ :param module: The Ansible module to fetch input parameters
+ :param cli: The CLI string
+ :return Global Booleans: NAME_EXISTS, NODE1_EXISTS, NODE2_EXISTS
+ """
+ name = module.params['pn_name']
+ node1 = module.params['pn_cluster_node1']
+ node2 = module.params['pn_cluster_node2']
+
+ show = cli + ' cluster-show format name,cluster-node-1,cluster-node-2 '
+ show = shlex.split(show)
+ out = module.run_command(show)[1]
+
+ out = out.split()
+ # Global flags
+ global NAME_EXISTS, NODE1_EXISTS, NODE2_EXISTS
+
+ if name in out:
+ NAME_EXISTS = True
+ else:
+ NAME_EXISTS = False
+ if node1 in out:
+ NODE1_EXISTS = True
+ else:
+ NODE2_EXISTS = False
+ if node2 in out:
+ NODE2_EXISTS = True
+ else:
+ NODE2_EXISTS = False
+
+
+def run_cli(module, cli):
+ """
+ This method executes the cli command on the target node(s) and returns the
+ output. The module then exits based on the output.
+ :param cli: the complete cli string to be executed on the target node(s).
+ :param module: The Ansible module to fetch command
+ """
+ cliswitch = module.params['pn_cliswitch']
+ state = module.params['state']
+ command = get_command_from_state(state)
+
+ cmd = shlex.split(cli)
+
+ # 'out' contains the output
+ # 'err' contains the error messages
+ result, out, err = module.run_command(cmd)
+
+ print_cli = cli.split(cliswitch)[1]
+
+ # Response in JSON format
+ if result != 0:
+ module.exit_json(
+ command=print_cli,
+ stderr=err.strip(),
+ msg="%s operation failed" % command,
+ changed=False
+ )
+
+ if out:
+ module.exit_json(
+ command=print_cli,
+ stdout=out.strip(),
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+ else:
+ module.exit_json(
+ command=print_cli,
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+
+def get_command_from_state(state):
+ """
+ This method gets appropriate command name for the state specified. It
+ returns the command name for the specified state.
+ :param state: The state for which the respective command name is required.
+ """
+ command = None
+ if state == 'present':
+ command = 'cluster-create'
+ if state == 'absent':
+ command = 'cluster-delete'
+ return command
+
+
+def main():
+ """ This section is for arguments parsing """
+ module = AnsibleModule(
+ argument_spec=dict(
+ pn_cliusername=dict(required=False, type='str'),
+ pn_clipassword=dict(required=False, type='str', no_log=True),
+ pn_cliswitch=dict(required=False, type='str', default='local'),
+ state =dict(required=True, type='str',
+ choices=['present', 'absent']),
+ pn_name=dict(required=True, type='str'),
+ pn_cluster_node1=dict(type='str'),
+ pn_cluster_node2=dict(type='str'),
+ pn_validate=dict(type='bool')
+ ),
+ required_if=(
+ ["state", "present",
+ ["pn_name", "pn_cluster_node1", "pn_cluster_node2"]],
+ ["state", "absent", ["pn_name"]]
+ )
+ )
+
+ # Accessing the parameters
+ state = module.params['state']
+ name = module.params['pn_name']
+ cluster_node1 = module.params['pn_cluster_node1']
+ cluster_node2 = module.params['pn_cluster_node2']
+ validate = module.params['pn_validate']
+
+ command = get_command_from_state(state)
+
+ # Building the CLI command string
+ cli = pn_cli(module)
+
+ if command == 'cluster-create':
+
+ check_cli(module, cli)
+
+ if NAME_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg='Cluster with name %s already exists' % name
+ )
+ if NODE1_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg='Node %s already part of a cluster' % cluster_node1
+ )
+ if NODE2_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg='Node %s already part of a cluster' % cluster_node2
+ )
+
+ cli += ' %s name %s ' % (command, name)
+ cli += 'cluster-node-1 %s cluster-node-2 %s ' % (cluster_node1,
+ cluster_node2)
+ if validate is True:
+ cli += ' validate '
+ if validate is False:
+ cli += ' no-validate '
+
+ if command == 'cluster-delete':
+
+ check_cli(module, cli)
+
+ if NAME_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='Cluster with name %s does not exist' % name
+ )
+ cli += ' %s name %s ' % (command, name)
+
+ run_cli(module, cli)
+
+# AnsibleModule boilerplate
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/network/netvisor/pn_ospf.py b/network/netvisor/pn_ospf.py
new file mode 100644
index 00000000000..7c4cbd1400f
--- /dev/null
+++ b/network/netvisor/pn_ospf.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+""" PN-CLI vrouter-ospf-add/remove """
+
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+import shlex
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: pn_ospf
+author: "Pluribus Networks (@amitsi)"
+version_added: "2.2"
+version: 1.0
+short_description: CLI command to add/remove ospf protocol to a vRouter.
+description:
+ - Execute vrouter-ospf-add, vrouter-ospf-remove command.
+ - This command adds/removes Open Shortest Path First(OSPF) routing
+ protocol to a virtual router(vRouter) service.
+options:
+ pn_cliusername:
+ description:
+ - Provide login username if user is not root.
+ required: False
+ pn_clipassword:
+ description:
+ - Provide login password if user is not root.
+ required: False
+ pn_cliswitch:
+ description:
+ - Target switch to run the CLI on.
+ required: False
+ state:
+ description:
+ - Assert the state of the ospf. Use 'present' to add ospf
+ and 'absent' to remove ospf.
+ required: True
+ default: present
+ choices: ['present', 'absent']
+ pn_vrouter_name:
+ description:
+ - Specify the name of the vRouter.
+ required: True
+ pn_network_ip:
+ description:
+ - Specify the network IP (IPv4 or IPv6) address.
+ required: True
+ pn_ospf_area:
+ description:
+ - Stub area number for the configuration. Required for vrouter-ospf-add.
+"""
+
+EXAMPLES = """
+- name: "Add OSPF to vrouter"
+ pn_ospf:
+ state: present
+ pn_vrouter_name: name-string
+ pn_network_ip: 192.168.11.2/24
+ pn_ospf_area: 1.0.0.0
+
+- name: "Remove OSPF from vrouter"
+ pn_ospf:
+ state: absent
+ pn_vrouter_name: name-string
+"""
+
+RETURN = """
+command:
+ description: The CLI command run on the target node(s).
+stdout:
+ description: The set of responses from the ospf command.
+ returned: always
+ type: list
+stderr:
+ description: The set of error responses from the ospf command.
+ returned: on error
+ type: list
+changed:
+ description: Indicates whether the CLI caused changes on the target.
+ returned: always
+ type: bool
+"""
+
+
+VROUTER_EXISTS = None
+NETWORK_EXISTS = None
+
+
+def pn_cli(module):
+ """
+ This method is to generate the cli portion to launch the Netvisor cli.
+ It parses the username, password, switch parameters from module.
+ :param module: The Ansible module to fetch username, password and switch
+ :return: returns the cli string for further processing
+ """
+ username = module.params['pn_cliusername']
+ password = module.params['pn_clipassword']
+ cliswitch = module.params['pn_cliswitch']
+
+ if username and password:
+ cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
+ else:
+ cli = '/usr/bin/cli --quiet '
+
+ if cliswitch == 'local':
+ cli += ' switch-local '
+ else:
+ cli += ' switch ' + cliswitch
+ return cli
+
+
+def check_cli(module, cli):
+ """
+ This method checks if vRouter exists on the target node.
+ This method also checks for idempotency using the vrouter-ospf-show command.
+ If the given vRouter exists, return VROUTER_EXISTS as True else False.
+ If an OSPF network with the given ip exists on the given vRouter,
+ return NETWORK_EXISTS as True else False.
+
+ :param module: The Ansible module to fetch input parameters
+ :param cli: The CLI string
+ :return Global Booleans: VROUTER_EXISTS, NETWORK_EXISTS
+ """
+ vrouter_name = module.params['pn_vrouter_name']
+ network_ip = module.params['pn_network_ip']
+ # Global flags
+ global VROUTER_EXISTS, NETWORK_EXISTS
+
+ # Check for vRouter
+ check_vrouter = cli + ' vrouter-show format name no-show-headers '
+ check_vrouter = shlex.split(check_vrouter)
+ out = module.run_command(check_vrouter)[1]
+ out = out.split()
+
+ if vrouter_name in out:
+ VROUTER_EXISTS = True
+ else:
+ VROUTER_EXISTS = False
+
+ # Check for OSPF networks
+ show = cli + ' vrouter-ospf-show vrouter-name %s ' % vrouter_name
+ show += 'format network no-show-headers'
+ show = shlex.split(show)
+ out = module.run_command(show)[1]
+ out = out.split()
+
+ if network_ip in out:
+ NETWORK_EXISTS = True
+ else:
+ NETWORK_EXISTS = False
+
+
+def run_cli(module, cli):
+ """
+ This method executes the cli command on the target node(s) and returns the
+ output. The module then exits based on the output.
+ :param cli: the complete cli string to be executed on the target node(s).
+ :param module: The Ansible module to fetch command
+ """
+ cliswitch = module.params['pn_cliswitch']
+ state = module.params['state']
+ command = get_command_from_state(state)
+ cmd = shlex.split(cli)
+
+ result, out, err = module.run_command(cmd)
+
+ print_cli = cli.split(cliswitch)[1]
+
+ # Response in JSON format
+ if result != 0:
+ module.exit_json(
+ command=print_cli,
+ stderr=err.strip(),
+ msg="%s operation failed" % command,
+ changed=False
+ )
+
+ if out:
+ module.exit_json(
+ command=print_cli,
+ stdout=out.strip(),
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+ else:
+ module.exit_json(
+ command=print_cli,
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+
+def get_command_from_state(state):
+ """
+ This method gets appropriate command name for the state specified. It
+ returns the command name for the specified state.
+ :param state: The state for which the respective command name is required.
+ """
+ command = None
+ if state == 'present':
+ command = 'vrouter-ospf-add'
+ if state == 'absent':
+ command = 'vrouter-ospf-remove'
+ return command
+
+
+def main():
+ """ This section is for arguments parsing """
+ module = AnsibleModule(
+ argument_spec=dict(
+ pn_cliusername=dict(required=False, type='str'),
+ pn_clipassword=dict(required=False, type='str', no_log=True),
+ pn_cliswitch=dict(required=False, type='str', default='local'),
+ state=dict(type='str', default='present', choices=['present',
+ 'absent']),
+ pn_vrouter_name=dict(required=True, type='str'),
+ pn_network_ip=dict(required=True, type='str'),
+ pn_ospf_area=dict(type='str')
+ ),
+ required_if=(
+ ['state', 'present',
+ ['pn_network_ip', 'pn_ospf_area']],
+ ['state', 'absent', ['pn_network_ip']]
+ )
+ )
+
+ # Accessing the arguments
+ state = module.params['state']
+ vrouter_name = module.params['pn_vrouter_name']
+ network_ip = module.params['pn_network_ip']
+ ospf_area = module.params['pn_ospf_area']
+
+ command = get_command_from_state(state)
+
+ # Building the CLI command string
+ cli = pn_cli(module)
+ check_cli(module, cli)
+
+ if state == 'present':
+ if VROUTER_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='vRouter %s does not exist' % vrouter_name
+ )
+ if NETWORK_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg=('OSPF with network ip %s already exists on %s'
+ % (network_ip, vrouter_name))
+ )
+ cli += (' %s vrouter-name %s network %s ospf-area %s'
+ % (command, vrouter_name, network_ip, ospf_area))
+
+ if state == 'absent':
+ if VROUTER_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='vRouter %s does not exist' % vrouter_name
+ )
+ if NETWORK_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg=('OSPF with network ip %s already exists on %s'
+ % (network_ip, vrouter_name))
+ )
+ cli += (' %s vrouter-name %s network %s'
+ % (command, vrouter_name, network_ip))
+
+ run_cli(module, cli)
+# AnsibleModule boilerplate
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/network/netvisor/pn_ospfarea.py b/network/netvisor/pn_ospfarea.py
new file mode 100644
index 00000000000..d34b145d28a
--- /dev/null
+++ b/network/netvisor/pn_ospfarea.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python
+""" PN-CLI vrouter-ospf-add/remove """
+
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+import shlex
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: pn_ospfarea
+author: "Pluribus Networks (@amitsi)"
+version_added: "2.2"
+version: 1.0
+short_description: CLI command to add/remove ospf area to/from a vrouter.
+description:
+ - Execute vrouter-ospf-add, vrouter-ospf-remove command.
+ - This command adds/removes Open Shortest Path First(OSPF) area to/from
+ a virtual router(vRouter) service.
+options:
+ pn_cliusername:
+ description:
+ - Login username.
+ required: true
+ pn_clipassword:
+ description:
+ - Login password.
+ required: true
+ pn_cliswitch:
+ description:
+ - Target switch(es) to run the CLI on.
+ required: False
+ state:
+ description:
+ - State the action to perform. Use 'present' to add ospf-area, 'absent'
+ to remove ospf-area and 'update' to modify ospf-area.
+ required: true
+ choices: ['present', 'absent', 'update']
+ pn_vrouter_name:
+ description:
+ - Specify the name of the vRouter.
+ required: true
+ pn_ospf_area:
+ description:
+ - Specify the OSPF area number.
+ required: true
+ pn_stub_type:
+ description:
+ - Specify the OSPF stub type.
+ choices: ['none', 'stub', 'stub-no-summary', 'nssa', 'nssa-no-summary']
+ pn_prefix_listin:
+ description:
+ - OSPF prefix list for filtering incoming packets.
+ pn_prefix_listout:
+ description:
+ - OSPF prefix list for filtering outgoing packets.
+ pn_quiet:
+ description:
+ - Enable/disable system information.
+ required: false
+ default: true
+"""
+
+EXAMPLES = """
+- name: "Add OSPF area to vrouter"
+ pn_ospfarea:
+ state: present
+ pn_cliusername: admin
+ pn_clipassword: admin
+ pn_ospf_area: 1.0.0.0
+ pn_stub_type: stub
+
+- name: "Remove OSPF from vrouter"
+ pn_ospf:
+ state: absent
+ pn_cliusername: admin
+ pn_clipassword: admin
+ pn_vrouter_name: name-string
+ pn_ospf_area: 1.0.0.0
+"""
+
+RETURN = """
+command:
+ description: The CLI command run on the target node(s).
+stdout:
+ description: The set of responses from the ospf command.
+ returned: always
+ type: list
+stderr:
+ description: The set of error responses from the ospf command.
+ returned: on error
+ type: list
+changed:
+ description: Indicates whether the CLI caused changes on the target.
+ returned: always
+ type: bool
+"""
+
+
+def get_command_from_state(state):
+ """
+ This method gets appropriate command name for the state specified. It
+ returns the command name for the specified state.
+ :param state: The state for which the respective command name is required.
+ """
+ command = None
+ if state == 'present':
+ command = 'vrouter-ospf-area-add'
+ if state == 'absent':
+ command = 'vrouter-ospf-area-remove'
+ if state == 'update':
+ command = 'vrouter-ospf-area-modify'
+ return command
+
+
+def main():
+ """ This section is for arguments parsing """
+ module = AnsibleModule(
+ argument_spec=dict(
+ pn_cliusername=dict(required=True, type='str'),
+ pn_clipassword=dict(required=True, type='str', no_log=True),
+ pn_cliswitch=dict(required=False, type='str'),
+ state =dict(required=True, type='str',
+ choices=['present', 'absent', 'update']),
+ pn_vrouter_name=dict(required=True, type='str'),
+ pn_ospf_area=dict(required=True, type='str'),
+ pn_stub_type=dict(type='str', choices=['none', 'stub', 'nssa',
+ 'stub-no-summary',
+ 'nssa-no-summary']),
+ pn_prefix_listin=dict(type='str'),
+ pn_prefix_listout=dict(type='str'),
+ pn_quiet=dict(type='bool', default='True')
+ )
+ )
+
+ # Accessing the arguments
+ cliusername = module.params['pn_cliusername']
+ clipassword = module.params['pn_clipassword']
+ cliswitch = module.params['pn_cliswitch']
+ state = module.params['state']
+ vrouter_name = module.params['pn_vrouter_name']
+ ospf_area = module.params['pn_ospf_area']
+ stub_type = module.params['pn_stub_type']
+ prefix_listin = module.params['pn_prefix_listin']
+ prefix_listout = module.params['pn_prefix_listout']
+ quiet = module.params['pn_quiet']
+
+ command = get_command_from_state(state)
+
+ # Building the CLI command string
+ cli = '/usr/bin/cli'
+
+ if quiet is True:
+ cli += ' --quiet '
+
+ cli += ' --user %s:%s ' % (cliusername, clipassword)
+
+ if cliswitch:
+ if cliswitch == 'local':
+ cli += ' switch-local '
+ else:
+ cli += ' switch ' + cliswitch
+
+ cli += ' %s vrouter-name %s area %s ' % (command, vrouter_name, ospf_area)
+
+ if stub_type:
+ cli += ' stub-type ' + stub_type
+
+ if prefix_listin:
+ cli += ' prefix-list-in ' + prefix_listin
+
+ if prefix_listout:
+ cli += ' prefix-list-out ' + prefix_listout
+
+ # Run the CLI command
+ ospfcommand = shlex.split(cli)
+
+ # 'out' contains the output
+ # 'err' contains the error messages
+ result, out, err = module.run_command(ospfcommand)
+
+ # Response in JSON format
+ if result != 0:
+ module.exit_json(
+ command=cli,
+ stderr=err.rstrip("\r\n"),
+ changed=False
+ )
+
+ else:
+ module.exit_json(
+ command=cli,
+ stdout=out.rstrip("\r\n"),
+ changed=True
+ )
+
+# AnsibleModule boilerplate
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/network/netvisor/pn_show.py b/network/netvisor/pn_show.py
new file mode 100644
index 00000000000..ff22667a434
--- /dev/null
+++ b/network/netvisor/pn_show.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+""" PN CLI show commands """
+
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+import shlex
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: pn_show
+author: "Pluribus Networks (@amitsi)"
+version_added: "2.2"
+version: 1.0
+short_description: Run show commands on nvOS device.
+description:
+ - Execute show command in the nodes and returns the results
+ read from the device.
+options:
+ pn_cliusername:
+ description:
+ - Provide login username if user is not root.
+ required: False
+ pn_clipassword:
+ description:
+ - Provide login password if user is not root.
+ required: False
+ pn_cliswitch:
+ description:
+ - Target switch(es) to run the cli on.
+ required: False
+ pn_command:
+ description:
+ - The C(pn_command) takes a CLI show command as value.
+ required: true
+ pn_parameters:
+ description:
+ - Display output using a specific parameter. Use 'all' to display possible
+ output. List of comma separated parameters.
+ pn_options:
+ description:
+ - Specify formatting options.
+"""
+
+EXAMPLES = """
+- name: run the vlan-show command
+ pn_show:
+ pn_command: 'vlan-show'
+ pn_parameters: id,scope,ports
+ pn_options: 'layout vertical'
+
+- name: run the vlag-show command
+ pn_show:
+ pn_command: 'vlag-show'
+ pn_parameters: 'id,name,cluster,mode'
+ pn_options: 'no-show-headers'
+
+- name: run the cluster-show command
+ pn_show:
+ pn_command: 'cluster-show'
+"""
+
+RETURN = """
+command:
+ description: The CLI command run on the target node(s).
+stdout:
+ description: The set of responses from the show command.
+ returned: always
+ type: list
+stderr:
+ description: The set of error responses from the show command.
+ returned: on error
+ type: list
+changed:
+ description: Indicates whether the CLI caused any change on the target.
+ returned: always(False)
+ type: bool
+"""
+
+
+def pn_cli(module):
+ """
+ This method is to generate the cli portion to launch the Netvisor cli.
+ It parses the username, password, switch parameters from module.
+ :param module: The Ansible module to fetch username, password and switch
+ :return: returns the cli string for further processing
+ """
+ username = module.params['pn_cliusername']
+ password = module.params['pn_clipassword']
+ cliswitch = module.params['pn_cliswitch']
+
+ if username and password:
+ cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
+ else:
+ cli = '/usr/bin/cli --quiet '
+
+ if cliswitch:
+ if cliswitch == 'local':
+ cli += ' switch-local '
+ else:
+ cli += ' switch ' + cliswitch
+ return cli
+
+
+def run_cli(module, cli):
+ """
+ This method executes the cli command on the target node(s) and returns the
+ output. The module then exits based on the output.
+ :param cli: the complete cli string to be executed on the target node(s).
+ :param module: The Ansible module to fetch command
+ """
+ cliswitch = module.params['pn_cliswitch']
+ command = module.params['pn_command']
+ cmd = shlex.split(cli)
+
+ # 'out' contains the output
+ # 'err' contains the error messages
+ result, out, err = module.run_command(cmd)
+
+ print_cli = cli.split(cliswitch)[1]
+
+ # Response in JSON format
+ if result != 0:
+ module.exit_json(
+ command=print_cli,
+ msg='%s: ' % command,
+ stderr=err.strip(),
+ changed=False
+ )
+
+ if out:
+ module.exit_json(
+ command=print_cli,
+ msg='%s: ' % command,
+ stdout=out.strip(),
+ changed=False
+ )
+
+ else:
+ module.exit_json(
+ command=cli,
+ msg='%s: Nothing to display!!!' % command,
+ changed=False
+ )
+
+
+def main():
+ """ This section is for arguments parsing """
+ module = AnsibleModule(
+ argument_spec=dict(
+ pn_cliusername=dict(required=True, type='str'),
+ pn_clipassword=dict(required=True, type='str', no_log=True),
+ pn_cliswitch=dict(required=False, type='str'),
+ pn_command=dict(required=True, type='str'),
+ pn_parameters=dict(default='all', type='str'),
+ pn_options=dict(type='str')
+ )
+ )
+
+ # Accessing the arguments
+ command = module.params['pn_command']
+ parameters = module.params['pn_parameters']
+ options = module.params['pn_options']
+
+ # Building the CLI command string
+ cli = pn_cli(module)
+
+ cli += ' %s format %s ' % (command, parameters)
+
+ if options:
+ cli += options
+
+ run_cli(module, cli)
+
+# AnsibleModule boilerplate
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/network/netvisor/pn_trunk.py b/network/netvisor/pn_trunk.py
new file mode 100644
index 00000000000..da3c568f203
--- /dev/null
+++ b/network/netvisor/pn_trunk.py
@@ -0,0 +1,450 @@
+#!/usr/bin/python
+""" PN CLI trunk-create/trunk-delete/trunk-modify """
+
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+import shlex
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: pn_trunk
+author: "Pluribus Networks (@amitsi)"
+version_added: "2.2"
+version: 1.0
+short_description: CLI command to create/delete/modify a trunk.
+description:
+ - Execute trunk-create or trunk-delete command.
+ - Trunks can be used to aggregate network links at Layer 2 on the local
+ switch. Use this command to create a new trunk.
+options:
+ pn_cliusername:
+ description:
+ - Provide login username if user is not root.
+ required: False
+ pn_clipassword:
+ description:
+ - Provide login password if user is not root.
+ required: False
+ pn_cliswitch:
+ description:
+ - Target switch(es) to run the cli on.
+ required: False
+ state:
+ description:
+ - State the action to perform. Use 'present' to create trunk,
+ 'absent' to delete trunk and 'update' to modify trunk.
+ required: True
+ choices: ['present', 'absent', 'update']
+ pn_name:
+ description:
+ - Specify the name for the trunk configuration.
+ required: true
+ pn_ports:
+ description:
+ - Specify the port number(s) for the link(s) to aggregate into the trunk.
+ - Required for trunk-create.
+ pn_speed:
+ description:
+ - Specify the port speed or disable the port.
+ choices: ['disable', '10m', '100m', '1g', '2.5g', '10g', '40g']
+ pn_egress_rate_limit:
+ description:
+ - Specify an egress port data rate limit for the configuration.
+ pn_jumbo:
+ description:
+ - Specify if the port can receive jumbo frames.
+ pn_lacp_mode:
+ description:
+ - Specify the LACP mode for the configuration.
+ choices: ['off', 'passive', 'active']
+ pn_lacp_priority:
+ description:
+ - Specify the LACP priority. This is a number between 1 and 65535 with a
+ default value of 32768.
+ pn_lacp_timeout:
+ description:
+ - Specify the LACP time out as slow (30 seconds) or fast (4seconds).
+ The default value is slow.
+ choices: ['slow', 'fast']
+ pn_lacp_fallback:
+ description:
+ - Specify the LACP fallback mode as bundles or individual.
+ choices: ['bundle', 'individual']
+ pn_lacp_fallback_timeout:
+ description:
+ - Specify the LACP fallback timeout in seconds. The range is between 30
+ and 60 seconds with a default value of 50 seconds.
+ pn_edge_switch:
+ description:
+ - Specify if the switch is an edge switch.
+ pn_pause:
+ description:
+ - Specify if pause frames are sent.
+ pn_description:
+ description:
+ - Specify a description for the trunk configuration.
+ pn_loopback:
+ description:
+ - Specify loopback if you want to use loopback.
+ pn_mirror_receive:
+ description:
+ - Specify if the configuration receives mirrored traffic.
+ pn_unknown_ucast_level:
+ description:
+ - Specify an unkown unicast level in percent. The default value is 100%.
+ pn_unknown_mcast_level:
+ description:
+ - Specify an unkown multicast level in percent. The default value is 100%.
+ pn_broadcast_level:
+ description:
+ - Specify a broadcast level in percent. The default value is 100%.
+ pn_port_macaddr:
+ description:
+ - Specify the MAC address of the port.
+ pn_loopvlans:
+ description:
+ - Specify a list of looping vlans.
+ pn_routing:
+ description:
+ - Specify if the port participates in routing on the network.
+ pn_host:
+ description:
+ - Host facing port control setting.
+"""
+
+EXAMPLES = """
+- name: create trunk
+ pn_trunk:
+ state: 'present'
+ pn_name: 'spine-to-leaf'
+ pn_ports: '11,12,13,14'
+
+- name: delete trunk
+ pn_trunk:
+ state: 'absent'
+ pn_name: 'spine-to-leaf'
+"""
+
+RETURN = """
+command:
+ description: The CLI command run on the target node(s).
+stdout:
+ description: The set of responses from the trunk command.
+ returned: always
+ type: list
+stderr:
+ description: The set of error responses from the trunk command.
+ returned: on error
+ type: list
+changed:
+ description: Indicates whether the CLI caused changes on the target.
+ returned: always
+ type: bool
+"""
+TRUNK_EXISTS = None
+
+
+def pn_cli(module):
+ """
+ This method is to generate the cli portion to launch the Netvisor cli.
+ It parses the username, password, switch parameters from module.
+ :param module: The Ansible module to fetch username, password and switch
+ :return: returns the cli string for further processing
+ """
+ username = module.params['pn_cliusername']
+ password = module.params['pn_clipassword']
+ cliswitch = module.params['pn_cliswitch']
+
+ if username and password:
+ cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
+ else:
+ cli = '/usr/bin/cli --quiet '
+
+ if cliswitch == 'local':
+ cli += ' switch-local '
+ else:
+ cli += ' switch ' + cliswitch
+ return cli
+
+
+def check_cli(module, cli):
+ """
+ This method checks for idempotency using the trunk-show command.
+ If a trunk with given name exists, return TRUNK_EXISTS as True else False.
+ :param module: The Ansible module to fetch input parameters
+ :param cli: The CLI string
+ :return Global Booleans: TRUNK_EXISTS
+ """
+ name = module.params['pn_name']
+
+ show = cli + ' trunk-show format switch,name no-show-headers'
+ show = shlex.split(show)
+ out = module.run_command(show)[1]
+
+ out = out.split()
+ # Global flags
+ global TRUNK_EXISTS
+ if name in out:
+ TRUNK_EXISTS = True
+ else:
+ TRUNK_EXISTS = False
+
+
+def run_cli(module, cli):
+ """
+ This method executes the cli command on the target node(s) and returns the
+ output. The module then exits based on the output.
+ :param cli: the complete cli string to be executed on the target node(s).
+ :param module: The Ansible module to fetch command
+ """
+ cliswitch = module.params['pn_cliswitch']
+ state = module.params['state']
+ command = get_command_from_state(state)
+
+ cmd = shlex.split(cli)
+
+ # 'out' contains the output
+ # 'err' contains the error messages
+ result, out, err = module.run_command(cmd)
+
+ print_cli = cli.split(cliswitch)[1]
+
+ # Response in JSON format
+ if result != 0:
+ module.exit_json(
+ command=print_cli,
+ stderr=err.strip(),
+ msg="%s operation failed" % command,
+ changed=False
+ )
+
+ if out:
+ module.exit_json(
+ command=print_cli,
+ stdout=out.strip(),
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+ else:
+ module.exit_json(
+ command=print_cli,
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+
+def get_command_from_state(state):
+ """
+ This method gets appropriate command name for the state specified. It
+ returns the command name for the specified state.
+ :param state: The state for which the respective command name is required.
+ """
+ command = None
+ if state == 'present':
+ command = 'trunk-create'
+ if state == 'absent':
+ command = 'trunk-delete'
+ if state == 'update':
+ command = 'trunk-modify'
+ return command
+
+
+def main():
+ """ This portion is for arguments parsing """
+ module = AnsibleModule(
+ argument_spec=dict(
+ pn_cliusername=dict(required=False, type='str'),
+ pn_clipassword=dict(required=False, type='str', no_log=True),
+ pn_cliswitch=dict(required=False, type='str', default='local'),
+ state=dict(required=True, type='str',
+ choices=['present', 'absent', 'update']),
+ pn_name=dict(required=True, type='str'),
+ pn_ports=dict(type='str'),
+ pn_speed=dict(type='str',
+ choices=['disable', '10m', '100m', '1g', '2.5g',
+ '10g', '40g']),
+ pn_egress_rate_limit=dict(type='str'),
+ pn_jumbo=dict(type='bool'),
+ pn_lacp_mode=dict(type='str', choices=[
+ 'off', 'passive', 'active']),
+ pn_lacp_priority=dict(type='int'),
+ pn_lacp_timeout=dict(type='str'),
+ pn_lacp_fallback=dict(type='str', choices=[
+ 'bundle', 'individual']),
+ pn_lacp_fallback_timeout=dict(type='str'),
+ pn_edge_switch=dict(type='bool'),
+ pn_pause=dict(type='bool'),
+ pn_description=dict(type='str'),
+ pn_loopback=dict(type='bool'),
+ pn_mirror_receive=dict(type='bool'),
+ pn_unknown_ucast_level=dict(type='str'),
+ pn_unknown_mcast_level=dict(type='str'),
+ pn_broadcast_level=dict(type='str'),
+ pn_port_macaddr=dict(type='str'),
+ pn_loopvlans=dict(type='str'),
+ pn_routing=dict(type='bool'),
+ pn_host=dict(type='bool')
+ ),
+ required_if=(
+ ["state", "present", ["pn_name", "pn_ports"]],
+ ["state", "absent", ["pn_name"]],
+ ["state", "update", ["pn_name"]]
+ )
+ )
+
+ # Accessing the arguments
+ state = module.params['state']
+ name = module.params['pn_name']
+ ports = module.params['pn_ports']
+ speed = module.params['pn_speed']
+ egress_rate_limit = module.params['pn_egress_rate_limit']
+ jumbo = module.params['pn_jumbo']
+ lacp_mode = module.params['pn_lacp_mode']
+ lacp_priority = module.params['pn_lacp_priority']
+ lacp_timeout = module.params['pn_lacp_timeout']
+ lacp_fallback = module.params['pn_lacp_fallback']
+ lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout']
+ edge_switch = module.params['pn_edge_switch']
+ pause = module.params['pn_pause']
+ description = module.params['pn_description']
+ loopback = module.params['pn_loopback']
+ mirror_receive = module.params['pn_mirror_receive']
+ unknown_ucast_level = module.params['pn_unknown_ucast_level']
+ unknown_mcast_level = module.params['pn_unknown_mcast_level']
+ broadcast_level = module.params['pn_broadcast_level']
+ port_macaddr = module.params['pn_port_macaddr']
+ loopvlans = module.params['pn_loopvlans']
+ routing = module.params['pn_routing']
+ host = module.params['pn_host']
+
+ command = get_command_from_state(state)
+
+ # Building the CLI command string
+ cli = pn_cli(module)
+
+ if command == 'trunk-delete':
+
+ check_cli(module, cli)
+ if TRUNK_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='Trunk with name %s does not exist' % name
+ )
+ cli += ' %s name %s ' % (command, name)
+
+ else:
+ if command == 'trunk-create':
+ check_cli(module, cli)
+ if TRUNK_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg='Trunk with name %s already exists' % name
+ )
+ cli += ' %s name %s ' % (command, name)
+
+ # Appending options
+ if ports:
+ cli += ' ports ' + ports
+
+ if speed:
+ cli += ' speed ' + speed
+
+ if egress_rate_limit:
+ cli += ' egress-rate-limit ' + egress_rate_limit
+
+ if jumbo is True:
+ cli += ' jumbo '
+ if jumbo is False:
+ cli += ' no-jumbo '
+
+ if lacp_mode:
+ cli += ' lacp-mode ' + lacp_mode
+
+ if lacp_priority:
+ cli += ' lacp-priority ' + lacp_priority
+
+ if lacp_timeout:
+ cli += ' lacp-timeout ' + lacp_timeout
+
+ if lacp_fallback:
+ cli += ' lacp-fallback ' + lacp_fallback
+
+ if lacp_fallback_timeout:
+ cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout
+
+ if edge_switch is True:
+ cli += ' edge-switch '
+ if edge_switch is False:
+ cli += ' no-edge-switch '
+
+ if pause is True:
+ cli += ' pause '
+ if pause is False:
+ cli += ' no-pause '
+
+ if description:
+ cli += ' description ' + description
+
+ if loopback is True:
+ cli += ' loopback '
+ if loopback is False:
+ cli += ' no-loopback '
+
+ if mirror_receive is True:
+ cli += ' mirror-receive-only '
+ if mirror_receive is False:
+ cli += ' no-mirror-receive-only '
+
+ if unknown_ucast_level:
+ cli += ' unknown-ucast-level ' + unknown_ucast_level
+
+ if unknown_mcast_level:
+ cli += ' unknown-mcast-level ' + unknown_mcast_level
+
+ if broadcast_level:
+ cli += ' broadcast-level ' + broadcast_level
+
+ if port_macaddr:
+ cli += ' port-mac-address ' + port_macaddr
+
+ if loopvlans:
+ cli += ' loopvlans ' + loopvlans
+
+ if routing is True:
+ cli += ' routing '
+ if routing is False:
+ cli += ' no-routing '
+
+ if host is True:
+ cli += ' host-enable '
+ if host is False:
+ cli += ' host-disable '
+
+ run_cli(module, cli)
+
+# Ansible boiler-plate
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/network/netvisor/pn_vlag.py b/network/netvisor/pn_vlag.py
new file mode 100644
index 00000000000..66b9ce05960
--- /dev/null
+++ b/network/netvisor/pn_vlag.py
@@ -0,0 +1,346 @@
+#!/usr/bin/python
+""" PN CLI vlag-create/vlag-delete/vlag-modify """
+
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+import shlex
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: pn_vlag
+author: "Pluribus Networks (@amitsi)"
+version_added: "2.2"
+version: 1.0
+short_description: CLI command to create/delete/modify vlag.
+description:
+ - Execute vlag-create/vlag-delete/vlag-modify command.
+ - A virtual link aggregation group (VLAG) allows links that are physically
+ connected to two different Pluribus Networks devices to appear as a single
+ trunk to a third device. The third device can be a switch, server, or any
+ Ethernet device. A VLAG can provide Layer 2 multipathing, which allows you
+ to create redundancy by increasing bandwidth, enabling multiple parallel
+ paths between nodes and loadbalancing traffic where alternative paths exist.
+options:
+ pn_cliusername:
+ description:
+ - Provide login username if user is not root.
+ required: False
+ pn_clipassword:
+ description:
+ - Provide login password if user is not root.
+ required: False
+ pn_cliswitch:
+ description:
+ - Target switch(es) to run this command on.
+ state:
+ description:
+ - State the action to perform. Use 'present' to create vlag,
+ 'absent' to delete vlag and 'update' to modify vlag.
+ required: True
+ choices: ['present', 'absent', 'update']
+ pn_name:
+ description:
+ - The C(pn_name) takes a valid name for vlag configuration.
+ required: true
+ pn_port:
+ description:
+ - Specify the local VLAG port.
+ - Required for vlag-create.
+ pn_peer_port:
+ description:
+ - Specify the peer VLAG port.
+ - Required for vlag-create.
+ pn_mode:
+ description:
+ - Specify the mode for the VLAG. Active-standby indicates one side is
+ active and the other side is in standby mode. Active-active indicates
+ that both sides of the vlag are up by default.
+ choices: ['active-active', 'active-standby']
+ pn_peer_switch:
+ description:
+ - Specify the fabric-name of the peer switch.
+ pn_failover_action:
+ description:
+ - Specify the failover action as move or ignore.
+ choices: ['move', 'ignore']
+ pn_lacp_mode:
+ description:
+ - Specify the LACP mode.
+ choices: ['off', 'passive', 'active']
+ pn_lacp_timeout:
+ description:
+ - Specify the LACP timeout as slow(30 seconds) or fast(4 seconds).
+ choices: ['slow', 'fast']
+ pn_lacp_fallback:
+ description:
+ - Specify the LACP fallback mode as bundles or individual.
+ choices: ['bundle', 'individual']
+ pn_lacp_fallback_timeout:
+ description:
+ - Specify the LACP fallback timeout in seconds. The range is between 30
+ and 60 seconds with a default value of 50 seconds.
+"""
+
+EXAMPLES = """
+- name: create a VLAG
+ pn_vlag:
+ state: 'present'
+ pn_name: spine-to-leaf
+ pn_port: 'spine01-to-leaf'
+ pn_peer_port: 'spine02-to-leaf'
+ pn_peer_switch: spine02
+ pn_mode: 'active-active'
+
+- name: delete VLAGs
+ pn_vlag:
+ state: 'absent'
+ pn_name: spine-to-leaf
+"""
+
+RETURN = """
+command:
+ description: The CLI command run on the target node(s).
+stdout:
+ description: The set of responses from the vlag command.
+ returned: always
+ type: list
+stderr:
+ description: The set of error responses from the vlag command.
+ returned: on error
+ type: list
+changed:
+ description: Indicates whether the CLI caused changes on the target.
+ returned: always
+ type: bool
+"""
+
+VLAG_EXISTS = None
+
+
+def pn_cli(module):
+ """
+ This method is to generate the cli portion to launch the Netvisor cli.
+ It parses the username, password, switch parameters from module.
+ :param module: The Ansible module to fetch username, password and switch
+ :return: returns the cli string for further processing
+ """
+ username = module.params['pn_cliusername']
+ password = module.params['pn_clipassword']
+ cliswitch = module.params['pn_cliswitch']
+
+ if username and password:
+ cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
+ else:
+ cli = '/usr/bin/cli --quiet '
+
+ if cliswitch == 'local':
+ cli += ' switch-local '
+ else:
+ cli += ' switch ' + cliswitch
+ return cli
+
+
+def check_cli(module, cli):
+ """
+ This method checks for idempotency using the vlag-show command.
+ If a vlag with given vlag exists, return VLAG_EXISTS as True else False.
+ :param module: The Ansible module to fetch input parameters
+ :param cli: The CLI string
+ :return Global Booleans: VLAG_EXISTS
+ """
+ name = module.params['pn_name']
+
+ show = cli + ' vlag-show format name no-show-headers'
+ show = shlex.split(show)
+ out = module.run_command(show)[1]
+
+ out = out.split()
+ # Global flags
+ global VLAG_EXISTS
+ if name in out:
+ VLAG_EXISTS = True
+ else:
+ VLAG_EXISTS = False
+
+
+def run_cli(module, cli):
+ """
+ This method executes the cli command on the target node(s) and returns the
+ output. The module then exits based on the output.
+ :param cli: the complete cli string to be executed on the target node(s).
+ :param module: The Ansible module to fetch command
+ """
+ cliswitch = module.params['pn_cliswitch']
+ state = module.params['state']
+ command = get_command_from_state(state)
+
+ cmd = shlex.split(cli)
+
+ # 'out' contains the output
+ # 'err' contains the error messages
+ result, out, err = module.run_command(cmd)
+
+ print_cli = cli.split(cliswitch)[1]
+
+ # Response in JSON format
+ if result != 0:
+ module.exit_json(
+ command=print_cli,
+ stderr=err.strip(),
+ msg="%s operation failed" % command,
+ changed=False
+ )
+
+ if out:
+ module.exit_json(
+ command=print_cli,
+ stdout=out.strip(),
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+ else:
+ module.exit_json(
+ command=print_cli,
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+
+def get_command_from_state(state):
+ """
+ This method gets appropriate command name for the state specified. It
+ returns the command name for the specified state.
+ :param state: The state for which the respective command name is required.
+ """
+ command = None
+ if state == 'present':
+ command = 'vlag-create'
+ if state == 'absent':
+ command = 'vlag-delete'
+ if state == 'update':
+ command = 'vlag-modify'
+ return command
+
+
+def main():
+ """ This section is for argument parsing """
+ module = AnsibleModule(
+ argument_spec=dict(
+ pn_cliusername=dict(required=False, type='str'),
+ pn_clipassword=dict(required=False, type='str', no_log=True),
+ pn_cliswitch=dict(required=False, type='str', default='local'),
+ state =dict(required=True, type='str',
+ choices=['present', 'absent', 'update']),
+ pn_name=dict(required=True, type='str'),
+ pn_port=dict(type='str'),
+ pn_peer_port=dict(type='str'),
+ pn_mode=dict(type='str', choices=[
+ 'active-standby', 'active-active']),
+ pn_peer_switch=dict(type='str'),
+ pn_failover_action=dict(type='str', choices=['move', 'ignore']),
+ pn_lacp_mode=dict(type='str', choices=[
+ 'off', 'passive', 'active']),
+ pn_lacp_timeout=dict(type='str', choices=['slow', 'fast']),
+ pn_lacp_fallback=dict(type='str', choices=[
+ 'individual', 'bundled']),
+ pn_lacp_fallback_timeout=dict(type='str')
+ ),
+ required_if=(
+ ["state", "present", ["pn_name", "pn_port", "pn_peer_port",
+ "pn_peer_switch"]],
+ ["state", "absent", ["pn_name"]],
+ ["state", "update", ["pn_name"]]
+ )
+ )
+
+ # Argument accessing
+ state = module.params['state']
+ name = module.params['pn_name']
+ port = module.params['pn_port']
+ peer_port = module.params['pn_peer_port']
+ mode = module.params['pn_mode']
+ peer_switch = module.params['pn_peer_switch']
+ failover_action = module.params['pn_failover_action']
+ lacp_mode = module.params['pn_lacp_mode']
+ lacp_timeout = module.params['pn_lacp_timeout']
+ lacp_fallback = module.params['pn_lacp_fallback']
+ lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout']
+
+ command = get_command_from_state(state)
+
+ # Building the CLI command string
+ cli = pn_cli(module)
+
+ if command == 'vlag-delete':
+
+ check_cli(module, cli)
+ if VLAG_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='VLAG with name %s does not exist' % name
+ )
+ cli += ' %s name %s ' % (command, name)
+
+ else:
+
+ if command == 'vlag-create':
+ check_cli(module, cli)
+ if VLAG_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg='VLAG with name %s already exists' % name
+ )
+ cli += ' %s name %s ' % (command, name)
+
+ if port:
+ cli += ' port %s peer-port %s ' % (port, peer_port)
+
+ if mode:
+ cli += ' mode ' + mode
+
+ if peer_switch:
+ cli += ' peer-switch ' + peer_switch
+
+ if failover_action:
+ cli += ' failover-' + failover_action + '-L2 '
+
+ if lacp_mode:
+ cli += ' lacp-mode ' + lacp_mode
+
+ if lacp_timeout:
+ cli += ' lacp-timeout ' + lacp_timeout
+
+ if lacp_fallback:
+ cli += ' lacp-fallback ' + lacp_fallback
+
+ if lacp_fallback_timeout:
+ cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout
+
+ run_cli(module, cli)
+
+# AnsibleModule boilerplate
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/network/netvisor/pn_vlan.py b/network/netvisor/pn_vlan.py
new file mode 100644
index 00000000000..c79e4ba5e3e
--- /dev/null
+++ b/network/netvisor/pn_vlan.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+""" PN CLI vlan-create/vlan-delete """
+
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+import shlex
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: pn_vlan
+author: "Pluribus Networks (@amitsi)"
+version_added: "2.2"
+version: 1.0
+short_description: CLI command to create/delete a VLAN.
+description:
+ - Execute vlan-create or vlan-delete command.
+ - VLANs are used to isolate network traffic at Layer 2.The VLAN identifiers
+ 0 and 4095 are reserved and cannot be used per the IEEE 802.1Q standard.
+ The range of configurable VLAN identifiers is 2 through 4092.
+options:
+ pn_cliusername:
+ description:
+ - Provide login username if user is not root.
+ required: False
+ pn_clipassword:
+ description:
+ - Provide login password if user is not root.
+ required: False
+ pn_cliswitch:
+ description:
+ - Target switch(es) to run the cli on.
+ required: False
+ state:
+ description:
+ - State the action to perform. Use 'present' to create vlan and
+ 'absent' to delete vlan.
+ required: True
+ choices: ['present', 'absent']
+ pn_vlanid:
+ description:
+ - Specify a VLAN identifier for the VLAN. This is a value between
+ 2 and 4092.
+ required: True
+ pn_scope:
+ description:
+ - Specify a scope for the VLAN.
+ - Required for vlan-create.
+ choices: ['fabric', 'local']
+ pn_description:
+ description:
+ - Specify a description for the VLAN.
+ pn_stats:
+ description:
+ - Specify if you want to collect statistics for a VLAN. Statistic
+ collection is enabled by default.
+ pn_ports:
+ description:
+ - Specifies the switch network data port number, list of ports, or range
+ of ports. Port numbers must ne in the range of 1 to 64.
+ pn_untagged_ports:
+ description:
+ - Specifies the ports that should have untagged packets mapped to the
+ VLAN. Untagged packets are packets that do not contain IEEE 802.1Q VLAN
+ tags.
+"""
+
+EXAMPLES = """
+- name: create a VLAN
+ pn_vlan:
+ state: 'present'
+ pn_vlanid: 1854
+ pn_scope: fabric
+
+- name: delete VLANs
+ pn_vlan:
+ state: 'absent'
+ pn_vlanid: 1854
+"""
+
+RETURN = """
+command:
+ description: The CLI command run on the target node(s).
+stdout:
+ description: The set of responses from the vlan command.
+ returned: always
+ type: list
+stderr:
+ description: The set of error responses from the vlan command.
+ returned: on error
+ type: list
+changed:
+ description: Indicates whether the CLI caused changes on the target.
+ returned: always
+ type: bool
+"""
+
+VLAN_EXISTS = None
+MAX_VLAN_ID = 4092
+MIN_VLAN_ID = 2
+
+
+def pn_cli(module):
+ """
+ This method is to generate the cli portion to launch the Netvisor cli.
+ It parses the username, password, switch parameters from module.
+ :param module: The Ansible module to fetch username, password and switch
+ :return: returns the cli string for further processing
+ """
+ username = module.params['pn_cliusername']
+ password = module.params['pn_clipassword']
+ cliswitch = module.params['pn_cliswitch']
+
+ if username and password:
+ cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
+ else:
+ cli = '/usr/bin/cli --quiet '
+
+ if cliswitch == 'local':
+ cli += ' switch-local '
+ else:
+ cli += ' switch ' + cliswitch
+ return cli
+
+
+def check_cli(module, cli):
+ """
+ This method checks for idempotency using the vlan-show command.
+ If a vlan with given vlan id exists, return VLAN_EXISTS as True else False.
+ :param module: The Ansible module to fetch input parameters
+ :param cli: The CLI string
+ :return Global Booleans: VLAN_EXISTS
+ """
+ vlanid = module.params['pn_vlanid']
+
+ show = cli + \
+ ' vlan-show id %s format id,scope no-show-headers' % str(vlanid)
+ show = shlex.split(show)
+ out = module.run_command(show)[1]
+
+ out = out.split()
+ # Global flags
+ global VLAN_EXISTS
+ if str(vlanid) in out:
+ VLAN_EXISTS = True
+ else:
+ VLAN_EXISTS = False
+
+
+def run_cli(module, cli):
+ """
+ This method executes the cli command on the target node(s) and returns the
+ output. The module then exits based on the output.
+ :param cli: the complete cli string to be executed on the target node(s).
+ :param module: The Ansible module to fetch command
+ """
+ cliswitch = module.params['pn_cliswitch']
+ state= module.params['state']
+ command = get_command_from_state(state)
+
+ cmd = shlex.split(cli)
+ # 'out' contains the output
+ # 'err' contains the error messages
+ result, out, err = module.run_command(cmd)
+
+ print_cli = cli.split(cliswitch)[1]
+
+ # Response in JSON format
+
+ if result != 0:
+ module.exit_json(
+ command=print_cli,
+ stderr=err.strip(),
+ msg="%s operation failed" % command,
+ changed=False
+ )
+
+ if out:
+ module.exit_json(
+ command=print_cli,
+ stdout=out.strip(),
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+ else:
+ module.exit_json(
+ command=print_cli,
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+
+def get_command_from_state(state):
+ """
+ This method gets appropriate command name for the state specified. It
+ returns the command name for the specified state.
+ :param state: The state for which the respective command name is required.
+ """
+ command = None
+ if state == 'present':
+ command = 'vlan-create'
+ if state == 'absent':
+ command = 'vlan-delete'
+ return command
+
+
+def main():
+ """ This section is for arguments parsing """
+ module = AnsibleModule(
+ argument_spec=dict(
+ pn_cliusername=dict(required=False, type='str'),
+ pn_clipassword=dict(required=False, type='str', no_log=True),
+ pn_cliswitch=dict(required=False, type='str', default='local'),
+ state =dict(required=True, type='str',
+ choices=['present', 'absent']),
+ pn_vlanid=dict(required=True, type='int'),
+ pn_scope=dict(type='str', choices=['fabric', 'local']),
+ pn_description=dict(type='str'),
+ pn_stats=dict(type='bool'),
+ pn_ports=dict(type='str'),
+ pn_untagged_ports=dict(type='str')
+ ),
+ required_if=(
+ ["state", "present", ["pn_vlanid", "pn_scope"]],
+ ["state", "absent", ["pn_vlanid"]]
+ )
+ )
+
+ # Accessing the arguments
+ state = module.params['state']
+ vlanid = module.params['pn_vlanid']
+ scope = module.params['pn_scope']
+ description = module.params['pn_description']
+ stats = module.params['pn_stats']
+ ports = module.params['pn_ports']
+ untagged_ports = module.params['pn_untagged_ports']
+
+ command = get_command_from_state(state)
+
+ # Building the CLI command string
+ cli = pn_cli(module)
+
+ if not MIN_VLAN_ID <= vlanid <= MAX_VLAN_ID:
+ module.exit_json(
+ msg="VLAN id must be between 2 and 4092",
+ changed=False
+ )
+
+ if command == 'vlan-create':
+
+ check_cli(module, cli)
+ if VLAN_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg='VLAN with id %s already exists' % str(vlanid)
+ )
+
+ cli += ' %s id %s scope %s ' % (command, str(vlanid), scope)
+
+ if description:
+ cli += ' description ' + description
+
+ if stats is True:
+ cli += ' stats '
+ if stats is False:
+ cli += ' no-stats '
+
+ if ports:
+ cli += ' ports ' + ports
+
+ if untagged_ports:
+ cli += ' untagged-ports ' + untagged_ports
+
+ if command == 'vlan-delete':
+
+ check_cli(module, cli)
+ if VLAN_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='VLAN with id %s does not exist' % str(vlanid)
+ )
+
+ cli += ' %s id %s ' % (command, str(vlanid))
+
+ run_cli(module, cli)
+
+# AnsibleModule boilerplate
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/network/netvisor/pn_vrouter.py b/network/netvisor/pn_vrouter.py
new file mode 100644
index 00000000000..bb0cadc76d2
--- /dev/null
+++ b/network/netvisor/pn_vrouter.py
@@ -0,0 +1,416 @@
+#!/usr/bin/python
+""" PN CLI vrouter-create/vrouter-delete/vrouter-modify """
+
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+import shlex
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: pn_vrouter
+author: "Pluribus Networks (@amitsi)"
+version_added: "2.2"
+version: 1
+short_description: CLI command to create/delete/modify a vrouter.
+description:
+ - Execute vrouter-create, vrouter-delete, vrouter-modify command.
+ - Each fabric, cluster, standalone switch, or virtual network (VNET) can
+ provide its tenants with a virtual router (vRouter) service that forwards
+ traffic between networks and implements Layer 3 protocols.
+ - C(vrouter-create) creates a new vRouter service.
+ - C(vrouter-delete) deletes a vRouter service.
+ - C(vrouter-modify) modifies a vRouter service.
+options:
+ pn_cliusername:
+ description:
+ - Provide login username if user is not root.
+ required: False
+ pn_clipassword:
+ description:
+ - Provide login password if user is not root.
+ required: False
+ pn_cliswitch:
+ description:
+ - Target switch(es) to run the CLI on.
+ required: False
+ state:
+ description:
+ - State the action to perform. Use 'present' to create vrouter,
+ 'absent' to delete vrouter and 'update' to modify vrouter.
+ required: True
+ choices: ['present', 'absent', 'update']
+ pn_name:
+ description:
+ - Specify the name of the vRouter.
+ required: true
+ pn_vnet:
+ description:
+ - Specify the name of the VNET.
+ - Required for vrouter-create.
+ pn_service_type:
+ description:
+ - Specify if the vRouter is a dedicated or shared VNET service.
+ choices: ['dedicated', 'shared']
+ pn_service_state:
+ description:
+ - Specify to enable or disable vRouter service.
+ choices: ['enable', 'disable']
+ pn_router_type:
+ description:
+ - Specify if the vRouter uses software or hardware.
+ - Note that if you specify hardware as router type, you cannot assign IP
+ addresses using DHCP. You must specify a static IP address.
+ choices: ['hardware', 'software']
+ pn_hw_vrrp_id:
+ description:
+ - Specifies the VRRP ID for a hardware vrouter.
+ pn_router_id:
+ description:
+ - Specify the vRouter IP address.
+ pn_bgp_as:
+ description:
+ - Specify the Autonomous System Number(ASN) if the vRouter runs Border
+ Gateway Protocol(BGP).
+ pn_bgp_redistribute:
+ description:
+ - Specify how BGP routes are redistributed.
+ choices: ['static', 'connected', 'rip', 'ospf']
+ pn_bgp_max_paths:
+ description:
+ - Specify the maximum number of paths for BGP. This is a number between
+ 1 and 255 or 0 to unset.
+ pn_bgp_options:
+ description:
+ - Specify other BGP options as a whitespaces separated string within
+ single quotes ''.
+ pn_rip_redistribute:
+ description:
+ - Specify how RIP routes are redistributed.
+ choices: ['static', 'connected', 'ospf', 'bgp']
+ pn_ospf_redistribute:
+ description:
+ - Specify how OSPF routes are redistributed.
+ choices: ['static', 'connected', 'bgp', 'rip']
+ pn_ospf_options:
+ description:
+ - Specify other OSPF options as a whitespaces separated string within
+ single quotes ''.
+"""
+
+EXAMPLES = """
+- name: create vrouter
+ pn_vrouter:
+ state: 'present'
+ pn_name: 'ansible-vrouter'
+ pn_vnet: 'ansible-fab-global'
+ pn_router_id: 208.74.182.1
+
+- name: delete vrouter
+ pn_vrouter:
+ state: 'absent'
+ pn_name: 'ansible-vrouter'
+"""
+
+RETURN = """
+command:
+ description: The CLI command run on the target node(s).
+stdout:
+ description: The set of responses from the vrouter command.
+ returned: always
+ type: list
+stderr:
+ description: The set of error responses from the vrouter command.
+ returned: on error
+ type: list
+changed:
+ description: Indicates whether the CLI caused changes on the target.
+ returned: always
+ type: bool
+"""
+
+VROUTER_EXISTS = None
+VROUTER_NAME_EXISTS = None
+
+
+def pn_cli(module):
+ """
+ This method is to generate the cli portion to launch the Netvisor cli.
+ It parses the username, password, switch parameters from module.
+ :param module: The Ansible module to fetch username, password and switch
+ :return: returns the cli string for further processing
+ """
+ username = module.params['pn_cliusername']
+ password = module.params['pn_clipassword']
+ cliswitch = module.params['pn_cliswitch']
+
+ if username and password:
+ cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
+ else:
+ cli = '/usr/bin/cli --quiet '
+
+ if cliswitch == 'local':
+ cli += ' switch-local '
+ else:
+ cli += ' switch ' + cliswitch
+ return cli
+
+
+def check_cli(module, cli):
+ """
+ This method checks for idempotency using the vlan-show command.
+ A switch can have only one vRouter configuration.
+ If a vRouter already exists on the given switch, return VROUTER_EXISTS as
+ True else False.
+ If a vRouter with the given name exists(on a different switch), return
+ VROUTER_NAME_EXISTS as True else False.
+
+ :param module: The Ansible module to fetch input parameters
+ :param cli: The CLI string
+ :return Global Booleans: VROUTER_EXISTS, VROUTER_NAME_EXISTS
+ """
+ name = module.params['pn_name']
+ # Global flags
+ global VROUTER_EXISTS, VROUTER_NAME_EXISTS
+
+ # Get the name of the local switch
+ location = cli + ' switch-setup-show format switch-name'
+ location = shlex.split(location)
+ out = module.run_command(location)[1]
+ location = out.split()[1]
+
+ # Check for any vRouters on the switch
+ check_vrouter = cli + ' vrouter-show location %s ' % location
+ check_vrouter += 'format name no-show-headers'
+ check_vrouter = shlex.split(check_vrouter)
+ out = module.run_command(check_vrouter)[1]
+
+ if out:
+ VROUTER_EXISTS = True
+ else:
+ VROUTER_EXISTS = False
+
+ # Check for any vRouters with the given name
+ show = cli + ' vrouter-show format name no-show-headers '
+ show = shlex.split(show)
+ out = module.run_command(show)[1]
+ out = out.split()
+
+ if name in out:
+ VROUTER_NAME_EXISTS = True
+ else:
+ VROUTER_NAME_EXISTS = False
+
+
+def run_cli(module, cli):
+ """
+ This method executes the cli command on the target node(s) and returns the
+ output. The module then exits based on the output.
+ :param cli: the complete cli string to be executed on the target node(s).
+ :param module: The Ansible module to fetch command
+ """
+ cliswitch = module.params['pn_cliswitch']
+ state = module.params['state']
+ command = get_command_from_state(state)
+
+ cmd = shlex.split(cli)
+
+ # 'out' contains the output
+ # 'err' contains the error messages
+ result, out, err = module.run_command(cmd)
+
+ print_cli = cli.split(cliswitch)[1]
+
+ # Response in JSON format
+ if result != 0:
+ module.exit_json(
+ command=print_cli,
+ stderr=err.strip(),
+ msg="%s operation failed" % command,
+ changed=False
+ )
+
+ if out:
+ module.exit_json(
+ command=print_cli,
+ stdout=out.strip(),
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+ else:
+ module.exit_json(
+ command=print_cli,
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+
+def get_command_from_state(state):
+ """
+ This method gets appropriate command name for the state specified. It
+ returns the command name for the specified state.
+ :param state: The state for which the respective command name is required.
+ """
+ command = None
+ if state == 'present':
+ command = 'vrouter-create'
+ if state == 'absent':
+ command = 'vrouter-delete'
+ if state == 'update':
+ command = 'vrouter-modify'
+ return command
+
+
+def main():
+ """ This section is for arguments parsing """
+ module = AnsibleModule(
+ argument_spec=dict(
+ pn_cliusername=dict(required=False, type='str'),
+ pn_clipassword=dict(required=False, type='str', no_log=True),
+ pn_cliswitch=dict(required=False, type='str', default='local'),
+ state =dict(required=True, type='str',
+ choices=['present', 'absent', 'update']),
+ pn_name=dict(required=True, type='str'),
+ pn_vnet=dict(type='str'),
+ pn_service_type=dict(type='str', choices=['dedicated', 'shared']),
+ pn_service_state=dict(type='str', choices=['enable', 'disable']),
+ pn_router_type=dict(type='str', choices=['hardware', 'software']),
+ pn_hw_vrrp_id=dict(type='int'),
+ pn_router_id=dict(type='str'),
+ pn_bgp_as=dict(type='int'),
+ pn_bgp_redistribute=dict(type='str', choices=['static', 'connected',
+ 'rip', 'ospf']),
+ pn_bgp_max_paths=dict(type='int'),
+ pn_bgp_options=dict(type='str'),
+ pn_rip_redistribute=dict(type='str', choices=['static', 'connected',
+ 'bgp', 'ospf']),
+ pn_ospf_redistribute=dict(type='str', choices=['static', 'connected',
+ 'bgp', 'rip']),
+ pn_ospf_options=dict(type='str'),
+ pn_vrrp_track_port=dict(type='str')
+ ),
+ required_if=(
+ ["state", "present", ["pn_name", "pn_vnet"]],
+ ["state", "absent", ["pn_name"]],
+ ["state", "update", ["pn_name"]]
+ )
+ )
+
+ # Accessing the arguments
+ state = module.params['state']
+ name = module.params['pn_name']
+ vnet = module.params['pn_vnet']
+ service_type = module.params['pn_service_type']
+ service_state = module.params['pn_service_state']
+ router_type = module.params['pn_router_type']
+ hw_vrrp_id = module.params['pn_hw_vrrp_id']
+ router_id = module.params['pn_router_id']
+ bgp_as = module.params['pn_bgp_as']
+ bgp_redistribute = module.params['pn_bgp_redistribute']
+ bgp_max_paths = module.params['pn_bgp_max_paths']
+ bgp_options = module.params['pn_bgp_options']
+ rip_redistribute = module.params['pn_rip_redistribute']
+ ospf_redistribute = module.params['pn_ospf_redistribute']
+ ospf_options = module.params['pn_ospf_options']
+ vrrp_track_port = module.params['pn_vrrp_track_port']
+
+ command = get_command_from_state(state)
+
+ # Building the CLI command string
+ cli = pn_cli(module)
+
+ if command == 'vrouter-delete':
+ check_cli(module, cli)
+ if VROUTER_NAME_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='vRouter with name %s does not exist' % name
+ )
+ cli += ' %s name %s ' % (command, name)
+
+ else:
+
+ if command == 'vrouter-create':
+ check_cli(module, cli)
+ if VROUTER_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg='Maximum number of vRouters has been reached on this '
+ 'switch'
+ )
+ if VROUTER_NAME_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg='vRouter with name %s already exists' % name
+ )
+ cli += ' %s name %s ' % (command, name)
+
+ if vnet:
+ cli += ' vnet ' + vnet
+
+ if service_type:
+ cli += ' %s-vnet-service ' % service_type
+
+ if service_state:
+ cli += ' ' + service_state
+
+ if router_type:
+ cli += ' router-type ' + router_type
+
+ if hw_vrrp_id:
+ cli += ' hw-vrrp-id ' + str(hw_vrrp_id)
+
+ if router_id:
+ cli += ' router-id ' + router_id
+
+ if bgp_as:
+ cli += ' bgp-as ' + str(bgp_as)
+
+ if bgp_redistribute:
+ cli += ' bgp-redistribute ' + bgp_redistribute
+
+ if bgp_max_paths:
+ cli += ' bgp-max-paths ' + str(bgp_max_paths)
+
+ if bgp_options:
+ cli += ' %s ' % bgp_options
+
+ if rip_redistribute:
+ cli += ' rip-redistribute ' + rip_redistribute
+
+ if ospf_redistribute:
+ cli += ' ospf-redistribute ' + ospf_redistribute
+
+ if ospf_options:
+ cli += ' %s ' % ospf_options
+
+ if vrrp_track_port:
+ cli += ' vrrp-track-port ' + vrrp_track_port
+
+ run_cli(module, cli)
+
+# AnsibleModule boilerplate
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/network/netvisor/pn_vrouterbgp.py b/network/netvisor/pn_vrouterbgp.py
new file mode 100644
index 00000000000..e53403da070
--- /dev/null
+++ b/network/netvisor/pn_vrouterbgp.py
@@ -0,0 +1,474 @@
+#!/usr/bin/python
+""" PN-CLI vrouter-bgp-add/vrouter-bgp-remove/vrouter-bgp-modify """
+
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+import shlex
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: pn_vrouterbgp
+author: "Pluribus Networks (@amitsi)"
+version_added: "2.2"
+version: 1.0
+short_description: CLI command to add/remove/modify vrouter-bgp.
+description:
+ - Execute vrouter-bgp-add, vrouter-bgp-remove, vrouter-bgp-modify command.
+ - Each fabric, cluster, standalone switch, or virtual network (VNET) can
+ provide its tenants with a vRouter service that forwards traffic between
+ networks and implements Layer 4 protocols.
+options:
+ pn_cliusername:
+ description:
+ - Provide login username if user is not root.
+ required: False
+ pn_clipassword:
+ description:
+ - Provide login password if user is not root.
+ required: False
+ pn_cliswitch:
+ description:
+ - Target switch(es) to run the cli on.
+ required: False
+ state:
+ description:
+ - State the action to perform. Use 'present' to add bgp,
+ 'absent' to remove bgp and 'update' to modify bgp.
+ required: True
+ choices: ['present', 'absent', 'update']
+ pn_vrouter_name:
+ description:
+ - Specify a name for the vRouter service.
+ required: True
+ pn_neighbor:
+ description:
+ - Specify a neighbor IP address to use for BGP.
+ - Required for vrouter-bgp-add.
+ pn_remote_as:
+ description:
+ - Specify the remote Autonomous System(AS) number. This value is between
+ 1 and 4294967295.
+ - Required for vrouter-bgp-add.
+ pn_next_hop_self:
+ description:
+ - Specify if the next-hop is the same router or not.
+ pn_password:
+ description:
+ - Specify a password, if desired.
+ pn_ebgp:
+ description:
+ - Specify a value for external BGP to accept or attempt BGP connections
+ to external peers, not directly connected, on the network. This is a
+ value between 1 and 255.
+ pn_prefix_listin:
+ description:
+ - Specify the prefix list to filter traffic inbound.
+ pn_prefix_listout:
+ description:
+ - Specify the prefix list to filter traffic outbound.
+ pn_route_reflector:
+ description:
+ - Specify if a route reflector client is used.
+ pn_override_capability:
+ description:
+ - Specify if you want to override capability.
+ pn_soft_reconfig:
+ description:
+ - Specify if you want a soft reconfiguration of inbound traffic.
+ pn_max_prefix:
+ description:
+ - Specify the maximum number of prefixes.
+ pn_max_prefix_warn:
+ description:
+ - Specify if you want a warning message when the maximum number of
+ prefixes is exceeded.
+ pn_bfd:
+ description:
+ - Specify if you want BFD protocol support for fault detection.
+ pn_multiprotocol:
+ description:
+ - Specify a multi-protocol for BGP.
+ choices: ['ipv4-unicast', 'ipv6-unicast']
+ pn_weight:
+ description:
+ - Specify a default weight value between 0 and 65535 for the neighbor
+ routes.
+ pn_default_originate:
+ description:
+ - Specify if you want announce default routes to the neighbor or not.
+ pn_keepalive:
+ description:
+ - Specify BGP neighbor keepalive interval in seconds.
+ pn_holdtime:
+ description:
+ - Specify BGP neighbor holdtime in seconds.
+ pn_route_mapin:
+ description:
+ - Specify inbound route map for neighbor.
+ pn_route_mapout:
+ description:
+ - Specify outbound route map for neighbor.
+"""
+
+EXAMPLES = """
+- name: add vrouter-bgp
+ pn_vrouterbgp:
+ state: 'present'
+ pn_vrouter_name: 'ansible-vrouter'
+ pn_neighbor: 104.104.104.1
+ pn_remote_as: 1800
+
+- name: remove vrouter-bgp
+ pn_vrouterbgp:
+ state: 'absent'
+ pn_name: 'ansible-vrouter'
+"""
+
+RETURN = """
+command:
+ description: The CLI command run on the target node(s).
+stdout:
+ description: The set of responses from the vrouterbpg command.
+ returned: always
+ type: list
+stderr:
+ description: The set of error responses from the vrouterbgp command.
+ returned: on error
+ type: list
+changed:
+ description: Indicates whether the CLI caused changes on the target.
+ returned: always
+ type: bool
+"""
+
+
+VROUTER_EXISTS = None
+NEIGHBOR_EXISTS = None
+
+
+def pn_cli(module):
+ """
+ This method is to generate the cli portion to launch the Netvisor cli.
+ It parses the username, password, switch parameters from module.
+ :param module: The Ansible module to fetch username, password and switch
+ :return: returns the cli string for further processing
+ """
+ username = module.params['pn_cliusername']
+ password = module.params['pn_clipassword']
+ cliswitch = module.params['pn_cliswitch']
+
+ if username and password:
+ cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
+ else:
+ cli = '/usr/bin/cli --quiet '
+
+ if cliswitch == 'local':
+ cli += ' switch-local '
+ else:
+ cli += ' switch ' + cliswitch
+ return cli
+
+
+def check_cli(module, cli):
+ """
+ This method checks if vRouter exists on the target node.
+ This method also checks for idempotency using the vrouter-bgp-show command.
+ If the given vRouter exists, return VROUTER_EXISTS as True else False.
+ If a BGP neighbor with the given ip exists on the given vRouter,
+ return NEIGHBOR_EXISTS as True else False.
+
+ :param module: The Ansible module to fetch input parameters
+ :param cli: The CLI string
+ :return Global Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS
+ """
+ vrouter_name = module.params['pn_vrouter_name']
+ neighbor = module.params['pn_neighbor']
+ # Global flags
+ global VROUTER_EXISTS, NEIGHBOR_EXISTS
+
+ # Check for vRouter
+ check_vrouter = cli + ' vrouter-show format name no-show-headers '
+ check_vrouter = shlex.split(check_vrouter)
+ out = module.run_command(check_vrouter)[1]
+ out = out.split()
+
+ if vrouter_name in out:
+ VROUTER_EXISTS = True
+ else:
+ VROUTER_EXISTS = False
+
+ # Check for BGP neighbors
+ show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name
+ show += 'format neighbor no-show-headers'
+ show = shlex.split(show)
+ out = module.run_command(show)[1]
+ out = out.split()
+
+ if neighbor in out:
+ NEIGHBOR_EXISTS = True
+ else:
+ NEIGHBOR_EXISTS = False
+
+
+def run_cli(module, cli):
+ """
+ This method executes the cli command on the target node(s) and returns the
+ output. The module then exits based on the output.
+ :param cli: the complete cli string to be executed on the target node(s).
+ :param module: The Ansible module to fetch command
+ """
+ cliswitch = module.params['pn_cliswitch']
+ state = module.params['state']
+ command = get_command_from_state(state)
+
+ cmd = shlex.split(cli)
+
+ # 'out' contains the output
+ # 'err' contains the error messages
+ result, out, err = module.run_command(cmd)
+
+ print_cli = cli.split(cliswitch)[1]
+
+ # Response in JSON format
+ if result != 0:
+ module.exit_json(
+ command=print_cli,
+ stderr=err.strip(),
+ msg="%s operation failed" % command,
+ changed=False
+ )
+
+ if out:
+ module.exit_json(
+ command=print_cli,
+ stdout=out.strip(),
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+ else:
+ module.exit_json(
+ command=print_cli,
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+
+def get_command_from_state(state):
+ """
+ This method gets appropriate command name for the state specified. It
+ returns the command name for the specified state.
+ :param state: The state for which the respective command name is required.
+ """
+ command = None
+ if state == 'present':
+ command = 'vrouter-bgp-add'
+ if state == 'absent':
+ command = 'vrouter-bgp-remove'
+ if state == 'update':
+ command = 'vrouter-bgp-modify'
+ return command
+
+
+def main():
+ """ This portion is for arguments parsing """
+ module = AnsibleModule(
+ argument_spec=dict(
+ pn_cliusername=dict(required=False, type='str'),
+ pn_clipassword=dict(required=False, type='str', no_log=True),
+ pn_cliswitch=dict(required=False, type='str', default='local'),
+ state=dict(required=True, type='str',
+ choices=['present', 'absent', 'update']),
+ pn_vrouter_name=dict(required=True, type='str'),
+ pn_neighbor=dict(type='str'),
+ pn_remote_as=dict(type='str'),
+ pn_next_hop_self=dict(type='bool'),
+ pn_password=dict(type='str', no_log=True),
+ pn_ebgp=dict(type='int'),
+ pn_prefix_listin=dict(type='str'),
+ pn_prefix_listout=dict(type='str'),
+ pn_route_reflector=dict(type='bool'),
+ pn_override_capability=dict(type='bool'),
+ pn_soft_reconfig=dict(type='bool'),
+ pn_max_prefix=dict(type='int'),
+ pn_max_prefix_warn=dict(type='bool'),
+ pn_bfd=dict(type='bool'),
+ pn_multiprotocol=dict(type='str',
+ choices=['ipv4-unicast', 'ipv6-unicast']),
+ pn_weight=dict(type='int'),
+ pn_default_originate=dict(type='bool'),
+ pn_keepalive=dict(type='str'),
+ pn_holdtime=dict(type='str'),
+ pn_route_mapin=dict(type='str'),
+ pn_route_mapout=dict(type='str')
+ ),
+ required_if=(
+ ["state", "present",
+ ["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]],
+ ["state", "absent",
+ ["pn_vrouter_name", "pn_neighbor"]],
+ ["state", "update",
+ ["pn_vrouter_name", "pn_neighbor"]]
+ )
+ )
+
+ # Accessing the arguments
+ state= module.params['state']
+ vrouter_name = module.params['pn_vrouter_name']
+ neighbor = module.params['pn_neighbor']
+ remote_as = module.params['pn_remote_as']
+ next_hop_self = module.params['pn_next_hop_self']
+ password = module.params['pn_password']
+ ebgp = module.params['pn_ebgp']
+ prefix_listin = module.params['pn_prefix_listin']
+ prefix_listout = module.params['pn_prefix_listout']
+ route_reflector = module.params['pn_route_reflector']
+ override_capability = module.params['pn_override_capability']
+ soft_reconfig = module.params['pn_soft_reconfig']
+ max_prefix = module.params['pn_max_prefix']
+ max_prefix_warn = module.params['pn_max_prefix_warn']
+ bfd = module.params['pn_bfd']
+ multiprotocol = module.params['pn_multiprotocol']
+ weight = module.params['pn_weight']
+ default_originate = module.params['pn_default_originate']
+ keepalive = module.params['pn_keepalive']
+ holdtime = module.params['pn_holdtime']
+ route_mapin = module.params['pn_route_mapin']
+ route_mapout = module.params['pn_route_mapout']
+
+ # Building the CLI command string
+ cli = pn_cli(module)
+
+ command = get_command_from_state(state)
+ if command == 'vrouter-bgp-remove':
+ check_cli(module, cli)
+ if VROUTER_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='vRouter %s does not exist' % vrouter_name
+ )
+ if NEIGHBOR_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg=('BGP neighbor with IP %s does not exist on %s'
+ % (neighbor, vrouter_name))
+ )
+ cli += (' %s vrouter-name %s neighbor %s '
+ % (command, vrouter_name, neighbor))
+
+ else:
+
+ if command == 'vrouter-bgp-add':
+ check_cli(module, cli)
+ if VROUTER_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='vRouter %s does not exist' % vrouter_name
+ )
+ if NEIGHBOR_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg=('BGP neighbor with IP %s already exists on %s'
+ % (neighbor, vrouter_name))
+ )
+
+ cli += (' %s vrouter-name %s neighbor %s '
+ % (command, vrouter_name, neighbor))
+
+ if remote_as:
+ cli += ' remote-as ' + str(remote_as)
+
+ if next_hop_self is True:
+ cli += ' next-hop-self '
+ if next_hop_self is False:
+ cli += ' no-next-hop-self '
+
+ if password:
+ cli += ' password ' + password
+
+ if ebgp:
+ cli += ' ebgp-multihop ' + str(ebgp)
+
+ if prefix_listin:
+ cli += ' prefix-list-in ' + prefix_listin
+
+ if prefix_listout:
+ cli += ' prefix-list-out ' + prefix_listout
+
+ if route_reflector is True:
+ cli += ' route-reflector-client '
+ if route_reflector is False:
+ cli += ' no-route-reflector-client '
+
+ if override_capability is True:
+ cli += ' override-capability '
+ if override_capability is False:
+ cli += ' no-override-capability '
+
+ if soft_reconfig is True:
+ cli += ' soft-reconfig-inbound '
+ if soft_reconfig is False:
+ cli += ' no-soft-reconfig-inbound '
+
+ if max_prefix:
+ cli += ' max-prefix ' + str(max_prefix)
+
+ if max_prefix_warn is True:
+ cli += ' max-prefix-warn-only '
+ if max_prefix_warn is False:
+ cli += ' no-max-prefix-warn-only '
+
+ if bfd is True:
+ cli += ' bfd '
+ if bfd is False:
+ cli += ' no-bfd '
+
+ if multiprotocol:
+ cli += ' multi-protocol ' + multiprotocol
+
+ if weight:
+ cli += ' weight ' + str(weight)
+
+ if default_originate is True:
+ cli += ' default-originate '
+ if default_originate is False:
+ cli += ' no-default-originate '
+
+ if keepalive:
+ cli += ' neighbor-keepalive-interval ' + keepalive
+
+ if holdtime:
+ cli += ' neighbor-holdtime ' + holdtime
+
+ if route_mapin:
+ cli += ' route-map-in ' + route_mapin
+
+ if route_mapout:
+ cli += ' route-map-out ' + route_mapout
+
+ run_cli(module, cli)
+# Ansible boiler-plate
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/network/netvisor/pn_vrouterif.py b/network/netvisor/pn_vrouterif.py
new file mode 100644
index 00000000000..4c5df6b7fd4
--- /dev/null
+++ b/network/netvisor/pn_vrouterif.py
@@ -0,0 +1,483 @@
+#!/usr/bin/python
+""" PN-CLI vrouter-interface-add/remove/modify """
+
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+import shlex
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: pn_vrouterif
+author: "Pluribus Networks (@amitsi)"
+version_added: "2.2"
+version: 1.0
+short_description: CLI command to add/remove/modify vrouter-interface.
+description:
+ - Execute vrouter-interface-add, vrouter-interface-remove,
+ vrouter-interface-modify command.
+ - You configure interfaces to vRouter services on a fabric, cluster,
+ standalone switch or virtual network(VNET).
+options:
+ pn_cliusername:
+ description:
+ - Provide login username if user is not root.
+ required: False
+ pn_clipassword:
+ description:
+ - Provide login password if user is not root.
+ required: False
+ pn_cliswitch:
+ description:
+ - Target switch to run the cli on.
+ required: False
+ state:
+ description:
+ - State the action to perform. Use 'present' to add vrouter interface,
+ 'absent' to remove vrouter interface and 'update' to modify vrouter
+ interface.
+ required: True
+ choices: ['present', 'absent', 'update']
+ pn_vrouter_name:
+ description:
+ - Specify the name of the vRouter interface.
+ required: True
+ pn_vlan:
+ description:
+ - Specify the VLAN identifier. This is a value between 1 and 4092.
+ pn_interface_ip:
+ description:
+ - Specify the IP address of the interface in x.x.x.x/n format.
+ pn_assignment:
+ description:
+ - Specify the DHCP method for IP address assignment.
+ choices: ['none', 'dhcp', 'dhcpv6', 'autov6']
+ pn_vxlan:
+ description:
+ - Specify the VXLAN identifier. This is a value between 1 and 16777215.
+ pn_interface:
+ description:
+ - Specify if the interface is management, data or span interface.
+ choices: ['mgmt', 'data', 'span']
+ pn_alias:
+ description:
+ - Specify an alias for the interface.
+ pn_exclusive:
+ description:
+ - Specify if the interface is exclusive to the configuration. Exclusive
+ means that other configurations cannot use the interface. Exclusive is
+ specified when you configure the interface as span interface and allows
+ higher throughput through the interface.
+ pn_nic_enable:
+ description:
+ - Specify if the NIC is enabled or not
+ pn_vrrp_id:
+ description:
+ - Specify the ID for the VRRP interface. The IDs on both vRouters must be
+ the same IS number.
+ pn_vrrp_priority:
+ description:
+ - Specify the priority for the VRRP interface. This is a value between
+ 1 (lowest) and 255 (highest).
+ pn_vrrp_adv_int:
+ description:
+ - Specify a VRRP advertisement interval in milliseconds. The range is
+ from 30 to 40950 with a default value of 1000.
+ pn_l3port:
+ description:
+ - Specify a Layer 3 port for the interface.
+ pn_secondary_macs:
+ description:
+ - Specify a secondary MAC address for the interface.
+ pn_nic_str:
+ description:
+ - Specify the type of NIC. Used for vrouter-interface remove/modify.
+"""
+
+EXAMPLES = """
+- name: Add vrouter-interface
+ pn_vrouterif:
+ pn_cliusername: admin
+ pn_clipassword: admin
+ state: 'present'
+ pn_vrouter_name: 'ansible-vrouter'
+ pn_interface_ip: 101.101.101.2/24
+ pn_vlan: 101
+
+- name: Add VRRP..
+ pn_vrouterif:
+ pn_cliusername: admin
+ pn_clipassword: admin
+ state: 'present'
+ pn_vrouter_name: 'ansible-vrouter'
+ pn_interface_ip: 101.101.101.2/24
+ pn_vrrp_ip: 101.101.101.1/24
+ pn_vrrp_priority: 100
+ pn_vlan: 101
+
+- name: Remove vrouter-interface
+ pn_vrouterif:
+ pn_cliusername: admin
+ pn_clipassword: admin
+ state: 'absent'
+ pn_vrouter_name: 'ansible-vrouter'
+ pn_interface_ip: 101.101.101.2/24
+"""
+
+RETURN = """
+vrouterifcmd:
+ description: The CLI command run on the target node(s).
+stdout/msg:
+ description: The set of responses from the vrouterif command.
+ returned: on success
+ type: list
+stderr/msg:
+ description: The set of error responses from the vrouterif command.
+ returned: on error
+ type: str
+changed:
+ description: Indicates whether the CLI caused changes on the target.
+ returned: always
+ type: bool
+"""
+
+
+VROUTER_EXISTS = None
+INTERFACE_EXISTS = None
+NIC_EXISTS = None
+VRRP_EXISTS = None
+
+
+def pn_cli(module):
+ """
+ This method is to generate the cli portion to launch the Netvisor cli.
+ It parses the username, password, switch parameters from module.
+ :param module: The Ansible module to fetch username, password and switch
+ :return: returns the cli string for further processing
+ """
+ username = module.params['pn_cliusername']
+ password = module.params['pn_clipassword']
+ cliswitch = module.params['pn_cliswitch']
+
+ if username and password:
+ cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
+ else:
+ cli = '/usr/bin/cli --quiet '
+
+ if cliswitch == 'local':
+ cli += ' switch-local '
+ else:
+ cli += ' switch ' + cliswitch
+ return cli
+
+
+def check_cli(module, cli):
+ """
+ This method checks if vRouter exists on the target node.
+ This method also checks for idempotency using the vrouter-interface-show
+ command.
+ If the given vRouter exists, return VROUTER_EXISTS as True else False.
+
+ If an interface with the given ip exists on the given vRouter,
+ return INTERFACE_EXISTS as True else False. This is required for
+ vrouter-interface-add.
+
+ If nic_str exists on the given vRouter, return NIC_EXISTS as True else
+ False. This is required for vrouter-interface-remove.
+
+ :param module: The Ansible module to fetch input parameters
+ :param cli: The CLI string
+ :return Global Booleans: VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS
+ """
+ vrouter_name = module.params['pn_vrouter_name']
+ interface_ip = module.params['pn_interface_ip']
+ nic_str = module.params['pn_nic_str']
+
+ # Global flags
+ global VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS
+
+ # Check for vRouter
+ check_vrouter = cli + ' vrouter-show format name no-show-headers '
+ check_vrouter = shlex.split(check_vrouter)
+ out = module.run_command(check_vrouter)[1]
+ out = out.split()
+
+ if vrouter_name in out:
+ VROUTER_EXISTS = True
+ else:
+ VROUTER_EXISTS = False
+
+ if interface_ip:
+ # Check for interface and VRRP and fetch nic for VRRP
+ show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name
+ show += 'ip %s format ip,nic no-show-headers' % interface_ip
+ show = shlex.split(show)
+ out = module.run_command(show)[1]
+ if out:
+ INTERFACE_EXISTS = True
+ else:
+ INTERFACE_EXISTS = False
+
+ if nic_str:
+ # Check for nic
+ show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name
+ show += ' format nic no-show-headers'
+ show = shlex.split(show)
+ out = module.run_command(show)[1]
+ if nic_str in out:
+ NIC_EXISTS = True
+ else:
+ NIC_EXISTS = False
+
+
+def get_nic(module, cli):
+ """
+ This module checks if VRRP interface can be added. If No, return VRRP_EXISTS
+ as True.
+ If Yes, fetch the nic string from the primary interface and return nic and
+ VRRP_EXISTS as False.
+ :param module:
+ :param cli:
+ :return: nic, Global Boolean: VRRP_EXISTS
+ """
+ vrouter_name = module.params['pn_vrouter_name']
+ interface_ip = module.params['pn_interface_ip']
+
+ global VRRP_EXISTS
+
+ # Check for interface and VRRP and fetch nic for VRRP
+ show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name
+ show += 'ip %s format ip,nic no-show-headers' % interface_ip
+ show = shlex.split(show)
+ out = module.run_command(show)[1]
+ out = out.split()
+
+ if len(out) > 3:
+ VRRP_EXISTS = True
+ return None
+ else:
+ nic = out[2]
+ VRRP_EXISTS = False
+ return nic
+
+
+def run_cli(module, cli):
+ """
+ This method executes the cli command on the target node(s) and returns the
+ output. The module then exits based on the output.
+ :param cli: the complete cli string to be executed on the target node(s).
+ :param module: The Ansible module to fetch command
+ """
+ cliswitch = module.params['pn_cliswitch']
+ state = module.params['state']
+ command = get_command_from_state(state)
+
+ cmd = shlex.split(cli)
+
+ # 'out' contains the output
+ # 'err' contains the error messages
+ result, out, err = module.run_command(cmd)
+
+ print_cli = cli.split(cliswitch)[1]
+
+ # Response in JSON format
+ if result != 0:
+ module.exit_json(
+ command=print_cli,
+ stderr=err.strip(),
+ msg="%s operation failed" % command,
+ changed=False
+ )
+
+ if out:
+ module.exit_json(
+ command=print_cli,
+ stdout=out.strip(),
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+ else:
+ module.exit_json(
+ command=print_cli,
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+
+def get_command_from_state(state):
+ """
+ This method gets appropriate command name for the state specified. It
+ returns the command name for the specified state.
+ :param state: The state for which the respective command name is required.
+ """
+ command = None
+ if state == 'present':
+ command = 'vrouter-interface-add'
+ if state == 'absent':
+ command = 'vrouter-interface-remove'
+ if state == 'update':
+ command = 'vrouter-interface-modify'
+ return command
+
+
+def main():
+ """ This portion is for arguments parsing """
+ module = AnsibleModule(
+ argument_spec=dict(
+ pn_cliusername=dict(required=False, type='str'),
+ pn_clipassword=dict(required=False, type='str', no_log=True),
+ pn_cliswitch=dict(required=False, type='str', default='local'),
+ state =dict(required=True, type='str',
+ choices=['present', 'absent']),
+ pn_vrouter_name=dict(required=True, type='str'),
+ pn_vlan=dict(type='int'),
+ pn_interface_ip=dict(required=True, type='str'),
+ pn_assignment=dict(type='str',
+ choices=['none', 'dhcp', 'dhcpv6', 'autov6']),
+ pn_vxlan=dict(type='int'),
+ pn_interface=dict(type='str', choices=['mgmt', 'data', 'span']),
+ pn_alias=dict(type='str'),
+ pn_exclusive=dict(type='bool'),
+ pn_nic_enable=dict(type='bool'),
+ pn_vrrp_id=dict(type='int'),
+ pn_vrrp_priority=dict(type='int'),
+ pn_vrrp_adv_int=dict(type='str'),
+ pn_l3port=dict(type='str'),
+ pn_secondary_macs=dict(type='str'),
+ pn_nic_str=dict(type='str')
+ ),
+ required_if=(
+ ["state", "present",
+ ["pn_vrouter_name", "pn_interface_ip"]],
+ ["state", "absent",
+ ["pn_vrouter_name", "pn_nic_str"]]
+ ),
+ )
+
+ # Accessing the arguments
+ state = module.params['state']
+ vrouter_name = module.params['pn_vrouter_name']
+ vlan = module.params['pn_vlan']
+ interface_ip = module.params['pn_interface_ip']
+ assignment = module.params['pn_assignment']
+ vxlan = module.params['pn_vxlan']
+ interface = module.params['pn_interface']
+ alias = module.params['pn_alias']
+ exclusive = module.params['pn_exclusive']
+ nic_enable = module.params['pn_nic_enable']
+ vrrp_id = module.params['pn_vrrp_id']
+ vrrp_priority = module.params['pn_vrrp_priority']
+ vrrp_adv_int = module.params['pn_vrrp_adv_int']
+ l3port = module.params['pn_l3port']
+ secondary_macs = module.params['pn_secondary_macs']
+ nic_str = module.params['pn_nic_str']
+
+ command = get_command_from_state(state)
+
+ # Building the CLI command string
+ cli = pn_cli(module)
+
+ check_cli(module, cli)
+ if command == 'vrouter-interface-add':
+ if VROUTER_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='vRouter %s does not exist' % vrouter_name
+ )
+
+ if vrrp_id:
+ vrrp_primary = get_nic(module, cli)
+ if VRRP_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg=('VRRP interface on %s already exists. Check '
+ 'the IP addresses' % vrouter_name)
+ )
+ cli += ' %s vrouter-name %s ' % (command, vrouter_name)
+ cli += (' ip %s vrrp-primary %s vrrp-id %s '
+ % (interface_ip, vrrp_primary, str(vrrp_id)))
+ if vrrp_priority:
+ cli += ' vrrp-priority %s ' % str(vrrp_priority)
+ if vrrp_adv_int:
+ cli += ' vrrp-adv-int %s ' % vrrp_adv_int
+
+ else:
+ if INTERFACE_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg=('vRouter interface on %s already exists. Check the '
+ 'IP addresses' % vrouter_name)
+ )
+ cli += ' %s vrouter-name %s ' % (command, vrouter_name)
+ cli += ' ip %s ' % interface_ip
+
+ if vlan:
+ cli += ' vlan ' + str(vlan)
+
+ if l3port:
+ cli += ' l3-port ' + l3port
+
+ if assignment:
+ cli += ' assignment ' + assignment
+
+ if vxlan:
+ cli += ' vxlan ' + str(vxlan)
+
+ if interface:
+ cli += ' if ' + interface
+
+ if alias:
+ cli += ' alias-on ' + alias
+
+ if exclusive is True:
+ cli += ' exclusive '
+ if exclusive is False:
+ cli += ' no-exclusive '
+
+ if nic_enable is True:
+ cli += ' nic-enable '
+ if nic_enable is False:
+ cli += ' nic-disable '
+
+ if secondary_macs:
+ cli += ' secondary-macs ' + secondary_macs
+
+ if command == 'vrouter-interface-remove':
+ if VROUTER_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='vRouter %s does not exist' % vrouter_name
+ )
+ if NIC_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='vRouter interface with nic %s does not exist' % nic_str
+ )
+ cli += ' %s vrouter-name %s nic %s ' % (command, vrouter_name, nic_str)
+
+ run_cli(module, cli)
+# Ansible boiler-plate
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/network/netvisor/pn_vrouterlbif.py b/network/netvisor/pn_vrouterlbif.py
new file mode 100644
index 00000000000..e6fc928a205
--- /dev/null
+++ b/network/netvisor/pn_vrouterlbif.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+""" PN CLI vrouter-loopback-interface-add/remove """
+
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+import shlex
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: pn_vrouterlbif
+author: "Pluribus Networks (@amitsi)"
+version_added: "2.2"
+version: 1.0
+short_description: CLI command to add/remove vrouter-loopback-interface.
+description:
+ - Execute vrouter-loopback-interface-add, vrouter-loopback-interface-remove
+ commands.
+ - Each fabric, cluster, standalone switch, or virtual network (VNET) can
+ provide its tenants with a virtual router (vRouter) service that forwards
+ traffic between networks and implements Layer 3 protocols.
+options:
+ pn_cliusername:
+ description:
+ - Provide login username if user is not root.
+ required: False
+ pn_clipassword:
+ description:
+ - Provide login password if user is not root.
+ required: False
+ pn_cliswitch:
+ description:
+ - Target switch(es) to run the cli on.
+ required: False
+ state:
+ description:
+ - State the action to perform. Use 'present' to add vrouter loopback
+ interface and 'absent' to remove vrouter loopback interface.
+ required: True
+ choices: ['present', 'absent']
+ pn_vrouter_name:
+ description:
+ - Specify the name of the vRouter.
+ required: True
+ pn_index:
+ description:
+ - Specify the interface index from 1 to 255.
+ pn_interface_ip:
+ description:
+ - Specify the IP address.
+ required: True
+"""
+
+EXAMPLES = """
+- name: add vrouter-loopback-interface
+ pn_vrouterlbif:
+ state: 'present'
+ pn_vrouter_name: 'ansible-vrouter'
+ pn_interface_ip: '104.104.104.1'
+
+- name: remove vrouter-loopback-interface
+ pn_vrouterlbif:
+ state: 'absent'
+ pn_vrouter_name: 'ansible-vrouter'
+ pn_interface_ip: '104.104.104.1'
+"""
+
+RETURN = """
+command:
+ description: The CLI command run on the target node(s).
+stdout:
+ description: The set of responses from the vrouterlb command.
+ returned: always
+ type: list
+stderr:
+ description: The set of error responses from the vrouterlb command.
+ returned: on error
+ type: list
+changed:
+ description: Indicates whether the CLI caused changes on the target.
+ returned: always
+ type: bool
+"""
+
+
+VROUTER_EXISTS = None
+LB_INTERFACE_EXISTS = None
+# Index range
+MIN_INDEX = 1
+MAX_INDEX = 255
+
+
+def pn_cli(module):
+ """
+ This method is to generate the cli portion to launch the Netvisor cli.
+ It parses the username, password, switch parameters from module.
+ :param module: The Ansible module to fetch username, password and switch
+ :return: returns the cli string for further processing
+ """
+ username = module.params['pn_cliusername']
+ password = module.params['pn_clipassword']
+ cliswitch = module.params['pn_cliswitch']
+
+ if username and password:
+ cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
+ else:
+ cli = '/usr/bin/cli --quiet '
+
+ if cliswitch == 'local':
+ cli += ' switch-local '
+ else:
+ cli += ' switch ' + cliswitch
+ return cli
+
+
+def check_cli(module, cli):
+ """
+ This method checks if vRouter exists on the target node.
+ This method also checks for idempotency using the
+ vrouter-loopback-interface-show command.
+ If the given vRouter exists, return VROUTER_EXISTS as True else False.
+ If a loopback interface with the given ip exists on the given vRouter,
+ return LB_INTERFACE_EXISTS as True else False.
+
+ :param module: The Ansible module to fetch input parameters
+ :param cli: The CLI string
+ :return Global Booleans: VROUTER_EXISTS, LB_INTERFACE_EXISTS
+ """
+ vrouter_name = module.params['pn_vrouter_name']
+ interface_ip = module.params['pn_interface_ip']
+
+ # Global flags
+ global VROUTER_EXISTS, LB_INTERFACE_EXISTS
+
+ # Check for vRouter
+ check_vrouter = cli + ' vrouter-show format name no-show-headers '
+ check_vrouter = shlex.split(check_vrouter)
+ out = module.run_command(check_vrouter)[1]
+ out = out.split()
+
+ if vrouter_name in out:
+ VROUTER_EXISTS = True
+ else:
+ VROUTER_EXISTS = False
+
+ # Check for loopback interface
+ show = (cli + ' vrouter-loopback-interface-show vrouter-name %s format ip '
+ 'no-show-headers' % vrouter_name)
+ show = shlex.split(show)
+ out = module.run_command(show)[1]
+ out = out.split()
+
+ if interface_ip in out:
+ LB_INTERFACE_EXISTS = True
+ else:
+ LB_INTERFACE_EXISTS = False
+
+
+def run_cli(module, cli):
+ """
+ This method executes the cli command on the target node(s) and returns the
+ output. The module then exits based on the output.
+ :param cli: the complete cli string to be executed on the target node(s).
+ :param module: The Ansible module to fetch command
+ """
+ cliswitch = module.params['pn_cliswitch']
+ state = module.params['state']
+ command = get_command_from_state(state)
+
+ cmd = shlex.split(cli)
+
+ # 'out' contains the output
+ # 'err' contains the error messages
+ result, out, err = module.run_command(cmd)
+
+ print_cli = cli.split(cliswitch)[1]
+
+ # Response in JSON format
+ if result != 0:
+ module.exit_json(
+ command=print_cli,
+ stderr=err.strip(),
+ msg="%s operation failed" % command,
+ changed=False
+ )
+
+ if out:
+ module.exit_json(
+ command=print_cli,
+ stdout=out.strip(),
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+ else:
+ module.exit_json(
+ command=print_cli,
+ msg="%s operation completed" % command,
+ changed=True
+ )
+
+
+def get_command_from_state(state):
+ """
+ This method gets appropriate command name for the state specified. It
+ returns the command name for the specified state.
+ :param state: The state for which the respective command name is required.
+ """
+ command = None
+ if state == 'present':
+ command = 'vrouter-loopback-interface-add'
+ if state == 'absent':
+ command = 'vrouter-loopback-interface-remove'
+ return command
+
+
+def main():
+ """ This portion is for arguments parsing """
+ module = AnsibleModule(
+ argument_spec=dict(
+ pn_cliusername=dict(required=False, type='str'),
+ pn_clipassword=dict(required=False, type='str', no_log=True),
+ pn_cliswitch=dict(required=False, type='str', default='local'),
+ state =dict(required=True, type='str',
+ choices=['present', 'absent']),
+ pn_vrouter_name=dict(required=True, type='str'),
+ pn_interface_ip=dict(type='str'),
+ pn_index=dict(type='int')
+ ),
+ required_if=(
+ ["state", "present",
+ ["pn_vrouter_name", "pn_interface_ip"]],
+ ["state", "absent",
+ ["pn_vrouter_name", "pn_interface_ip"]]
+ )
+ )
+
+ # Accessing the arguments
+ state = module.params['state']
+ vrouter_name = module.params['pn_vrouter_name']
+ interface_ip = module.params['pn_interface_ip']
+ index = module.params['pn_index']
+
+ command = get_command_from_state(state)
+
+ # Building the CLI command string
+ cli = pn_cli(module)
+
+ if index:
+ if not MIN_INDEX <= index <= MAX_INDEX:
+ module.exit_json(
+ msg="Index must be between 1 and 255",
+ changed=False
+ )
+ index = str(index)
+
+ if command == 'vrouter-loopback-interface-remove':
+ check_cli(module, cli)
+ if VROUTER_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg='vRouter %s does not exist' % vrouter_name
+ )
+ if LB_INTERFACE_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg=('Loopback interface with IP %s does not exist on %s'
+ % (interface_ip, vrouter_name))
+ )
+ if not index:
+ # To remove loopback interface, we need the index.
+ # If index is not specified, get the Loopback interface index
+ # using the given interface ip.
+ get_index = cli
+ get_index += (' vrouter-loopback-interface-show vrouter-name %s ip '
+ '%s ' % (vrouter_name, interface_ip))
+ get_index += 'format index no-show-headers'
+
+ get_index = shlex.split(get_index)
+ out = module.run_command(get_index)[1]
+ index = out.split()[1]
+
+ cli += ' %s vrouter-name %s index %s' % (command, vrouter_name, index)
+
+ if command == 'vrouter-loopback-interface-add':
+ check_cli(module, cli)
+ if VROUTER_EXISTS is False:
+ module.exit_json(
+ skipped=True,
+ msg=('vRouter %s does not exist' % vrouter_name)
+ )
+ if LB_INTERFACE_EXISTS is True:
+ module.exit_json(
+ skipped=True,
+ msg=('Loopback interface with IP %s already exists on %s'
+ % (interface_ip, vrouter_name))
+ )
+ cli += (' %s vrouter-name %s ip %s'
+ % (command, vrouter_name, interface_ip))
+ if index:
+ cli += ' index %s ' % index
+
+ run_cli(module, cli)
+
+# Ansible boiler-plate
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_template.py b/network/nxos/_nxos_template.py
similarity index 93%
rename from network/nxos/nxos_template.py
rename to network/nxos/_nxos_template.py
index c8cc638f189..a5b976aa4ce 100644
--- a/network/nxos/nxos_template.py
+++ b/network/nxos/_nxos_template.py
@@ -15,6 +15,11 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+
DOCUMENTATION = """
---
module: nxos_template
@@ -28,6 +33,7 @@
by evaluating the current running-config and only pushing configuration
commands that are not already configured. The config source can
be a set of commands or a template.
+deprecated: Deprecated in 2.2. Use nxos_config instead
extends_documentation_fragment: nxos
options:
src:
@@ -89,7 +95,7 @@
src: config.j2
force: yes
-- name: provide the base configuration for comparision
+- name: provide the base configuration for comparison
nxos_template:
src: candidate_config.txt
config: current_config.txt
@@ -104,12 +110,13 @@
responses:
description: The set of responses from issuing the commands on the device
- retured: when not check_mode
+ returned: when not check_mode
type: list
sample: ['...', '...']
"""
+import ansible.module_utils.nxos
from ansible.module_utils.netcfg import NetworkConfig, dumps
-from ansible.module_utils.nxos import NetworkModule, NetworkError
+from ansible.module_utils.network import NetworkModule
def get_config(module):
config = module.params['config'] or dict()
diff --git a/network/nxos/nxos_aaa_server.py b/network/nxos/nxos_aaa_server.py
new file mode 100644
index 00000000000..6b4f52ae4e3
--- /dev/null
+++ b/network/nxos/nxos_aaa_server.py
@@ -0,0 +1,573 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+
+module: nxos_aaa_server
+version_added: "2.2"
+short_description: Manages AAA server global configuration.
+description:
+ - Manages AAA server global configuration
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+notes:
+ - The server_type parameter is always required.
+ - If encrypt_type is not supplied, the global AAA server key will be
+ stored as encrypted (type 7).
+ - Changes to the global AAA server key with encrypt_type=0
+ are not idempotent.
+ - If global AAA server key is not found, it's shown as "unknown"
+ - state=default will set the supplied parameters to their default values.
+ The parameters that you want to default must also be set to default.
+ If global_key=default, the global key will be removed.
+options:
+ server_type:
+ description:
+ - The server type is either radius or tacacs.
+ required: true
+ choices: ['radius', 'tacacs']
+ global_key:
+ description:
+ - Global AAA shared secret.
+ required: false
+ default: null
+ encrypt_type:
+ description:
+ - The state of encryption applied to the entered global key.
+ O clear text, 7 encrypted. Type-6 encryption is not supported.
+ required: false
+ default: null
+ choices: ['0', '7']
+ deadtime:
+ description:
+ - Duration for which a non-reachable AAA server is skipped,
+ in minutes. Range is 1-1440. Device default is 0.
+ required: false
+ default: null
+ server_timeout:
+ description:
+ - Global AAA server timeout period, in seconds. Range is 1-60.
+ Device default is 5.
+ required: false
+ default: null
+ directed_request:
+ description:
+ - Enables direct authentication requests to AAA server.
+ Device default is disabled.
+ required: false
+ default: null
+ choices: ['enabled', 'disabled']
+ state:
+ description:
+ - Manage the state of the resource.
+ required: true
+ default: present
+ choices: ['present','default']
+'''
+
+EXAMPLES = '''
+# Radius Server Basic settings
+ - name: "Radius Server Basic settings"
+ nxos_aaa_server:
+ server_type: radius
+ server_timeout: 9
+ deadtime: 20
+ directed_request: enabled
+ host: inventory_hostname }}
+ username: un }}
+ password: pwd }}
+
+# Tacacs Server Basic settings
+ - name: "Tacacs Server Basic settings"
+ nxos_aaa_server:
+ server_type: tacacs
+ server_timeout: 8
+ deadtime: 19
+ directed_request: disabled
+ host: inventory_hostname }}
+ username: un }}
+ password: pwd }}
+
+# Setting Global Key
+ - name: "AAA Server Global Key"
+ nxos_aaa_server:
+ server_type: radius
+ global_key: test_key
+ host: inventory_hostname }}
+ username: un }}
+ password: pwd }}
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"deadtime": "22", "directed_request": "enabled",
+ "server_type": "radius", "server_timeout": "11"}
+existing:
+ description:
+ - k/v pairs of existing aaa server
+ type: dict
+ sample: {"deadtime": "0", "directed_request": "disabled",
+ "global_key": "unknown", "server_timeout": "5"}
+end_state:
+ description: k/v pairs of aaa params after module execution
+ returned: always
+ type: dict
+ sample: {"deadtime": "22", "directed_request": "enabled",
+ "global_key": "unknown", "server_timeout": "11"}
+state:
+ description: state as sent in from the playbook
+ returned: always
+ type: string
+ sample: "present"
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["radius-server deadtime 22", "radius-server timeout 11",
+ "radius-server directed-request"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ cmds = [command]
+ if module.params['transport'] == 'cli':
+ body = execute_show(cmds, module)
+ elif module.params['transport'] == 'nxapi':
+ body = execute_show(cmds, module, command_type=command_type)
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+
+def get_aaa_server_info(server_type, module):
+ aaa_server_info = {}
+ server_command = 'show {0}-server'.format(server_type)
+ request_command = 'show {0}-server directed-request'.format(server_type)
+ global_key_command = 'show run | sec {0}'.format(server_type)
+ aaa_regex = '.*{0}-server\skey\s\d\s+(?P\S+).*'.format(server_type)
+
+ server_body = execute_show_command(
+ server_command, module, command_type='cli_show_ascii')[0]
+
+ split_server = server_body.splitlines()
+
+ for line in split_server:
+ if line.startswith('timeout'):
+ aaa_server_info['server_timeout'] = line.split(':')[1]
+
+ elif line.startswith('deadtime'):
+ aaa_server_info['deadtime'] = line.split(':')[1]
+
+ request_body = execute_show_command(
+ request_command, module, command_type='cli_show_ascii')[0]
+ aaa_server_info['directed_request'] = request_body.replace('\n', '')
+
+ key_body = execute_show_command(
+ global_key_command, module, command_type='cli_show_ascii')[0]
+
+ try:
+ match_global_key = re.match(aaa_regex, key_body, re.DOTALL)
+ group_key = match_global_key.groupdict()
+ aaa_server_info['global_key'] = group_key["key"].replace('\"', '')
+ except (AttributeError, TypeError):
+ aaa_server_info['global_key'] = 'unknown'
+
+ return aaa_server_info
+
+
+def set_aaa_server_global_key(encrypt_type, key, server_type):
+ if not encrypt_type:
+ encrypt_type = ''
+ return '{0}-server key {1} {2}'.format(
+ server_type, encrypt_type, key)
+
+
+def config_aaa_server(params, server_type):
+ cmds = []
+
+ deadtime = params.get('deadtime')
+ server_timeout = params.get('server_timeout')
+ directed_request = params.get('directed_request')
+ encrypt_type = params.get('encrypt_type', '7')
+ global_key = params.get('global_key')
+
+ if deadtime is not None:
+ cmds.append('{0}-server deadtime {1}'.format(server_type, deadtime))
+
+ if server_timeout is not None:
+ cmds.append('{0}-server timeout {1}'.format(server_type, server_timeout))
+
+ if directed_request is not None:
+ if directed_request == 'enabled':
+ cmds.append('{0}-server directed-request'.format(server_type))
+ elif directed_request == 'disabled':
+ cmds.append('no {0}-server directed-request'.format(server_type))
+
+ if global_key is not None:
+ cmds.append('{0}-server key {1} {2}'.format(server_type, encrypt_type,
+ global_key))
+
+ return cmds
+
+
+def default_aaa_server(existing, params, server_type):
+ cmds = []
+
+ deadtime = params.get('deadtime')
+ server_timeout = params.get('server_timeout')
+ directed_request = params.get('directed_request')
+ global_key = params.get('global_key')
+ existing_key = existing.get('global_key')
+
+ if deadtime is not None:
+ cmds.append('no {0}-server deadtime 1'.format(server_type))
+
+ if server_timeout is not None:
+ cmds.append('no {0}-server timeout 1'.format(server_type))
+
+ if directed_request is not None:
+ cmds.append('no {0}-server directed-request'.format(server_type))
+
+ if global_key is not None and existing_key is not None:
+ cmds.append('no {0}-server key 7 {1}'.format(server_type, existing_key))
+
+ return cmds
+
+
+def main():
+ argument_spec = dict(
+ server_type=dict(type='str',
+ choices=['radius', 'tacacs'], required=True),
+ global_key=dict(type='str'),
+ encrypt_type=dict(type='str', choices=['0', '7']),
+ deadtime=dict(type='str'),
+ server_timeout=dict(type='str'),
+ directed_request=dict(type='str',
+ choices=['enabled', 'disabled', 'default']),
+ state=dict(choices=['default', 'present'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ server_type = module.params['server_type']
+ global_key = module.params['global_key']
+ encrypt_type = module.params['encrypt_type']
+ deadtime = module.params['deadtime']
+ server_timeout = module.params['server_timeout']
+ directed_request = module.params['directed_request']
+ state = module.params['state']
+
+ if encrypt_type and not global_key:
+ module.fail_json(msg='encrypt_type must be used with global_key.')
+
+ args = dict(server_type=server_type, global_key=global_key,
+ encrypt_type=encrypt_type, deadtime=deadtime,
+ server_timeout=server_timeout, directed_request=directed_request)
+
+ changed = False
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+
+ existing = get_aaa_server_info(server_type, module)
+ end_state = existing
+
+ commands = []
+ if state == 'present':
+ if deadtime:
+ try:
+ if int(deadtime) < 0 or int(deadtime) > 1440:
+ raise ValueError
+ except ValueError:
+ module.fail_json(
+ msg='deadtime must be an integer between 0 and 1440')
+
+ if server_timeout:
+ try:
+ if int(server_timeout) < 1 or int(server_timeout) > 60:
+ raise ValueError
+ except ValueError:
+ module.fail_json(
+ msg='server_timeout must be an integer between 1 and 60')
+
+ delta = dict(set(proposed.iteritems()).difference(
+ existing.iteritems()))
+ if delta:
+ command = config_aaa_server(delta, server_type)
+ if command:
+ commands.append(command)
+
+ elif state == 'default':
+ for key, value in proposed.iteritems():
+ if key != 'server_type' and value != 'default':
+ module.fail_json(
+ msg='Parameters must be set to "default"'
+ 'when state=default')
+ command = default_aaa_server(existing, proposed, server_type)
+ if command:
+ commands.append(command)
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_aaa_server_info(server_type, module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['updates'] = cmds
+ results['changed'] = changed
+ results['end_state'] = end_state
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_aaa_server_host.py b/network/nxos/nxos_aaa_server_host.py
new file mode 100644
index 00000000000..aef2af144f2
--- /dev/null
+++ b/network/nxos/nxos_aaa_server_host.py
@@ -0,0 +1,582 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_aaa_server_host
+version_added: "2.2"
+short_description: Manages AAA server host-specific configuration.
+description:
+ - Manages AAA server host-specific configuration.
+extends_documentation_fragment: nxos
+author: Jason Edelman (@jedelman8)
+notes:
+ - Changes to the AAA server host key (shared secret) are not idempotent.
+ - If C(state=absent) removes the whole host configuration.
+options:
+ server_type:
+ description:
+ - The server type is either radius or tacacs.
+ required: true
+ choices: ['radius', 'tacacs']
+ address:
+ description:
+ - Address or name of the radius or tacacs host.
+ required: true
+ key:
+ description:
+ - Shared secret for the specified host.
+ required: false
+ default: null
+ encrypt_type:
+ description:
+ - The state of encryption applied to the entered key.
+ O for clear text, 7 for encrypted. Type-6 encryption is
+ not supported.
+ required: false
+ default: null
+ choices: ['0', '7']
+ host_timeout:
+ description:
+ - Timeout period for specified host, in seconds. Range is 1-60.
+ required: false
+ default: null
+ auth_port:
+ description:
+ - Alternate UDP port for RADIUS authentication.
+ required: false
+ default: null
+ acct_port:
+ description:
+ - Alternate UDP port for RADIUS accounting.
+ required: false
+ default: null
+ tacacs_port:
+ description:
+ - Alternate TCP port TACACS Server.
+ required: false
+ default: null
+ state:
+ description:
+ - Manage the state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+EXAMPLES = '''
+# Radius Server Host Basic settings
+ - name: "Radius Server Host Basic settings"
+ nxos_aaa_server_host:
+ state: present
+ server_type: radius
+ address: 1.2.3.4
+ acct_port: 2084
+ host_timeout: 10
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# Radius Server Host Key Configuration
+ - name: "Radius Server Host Key Configuration"
+ nxos_aaa_server_host:
+ state: present
+ server_type: radius
+ address: 1.2.3.4
+ key: hello
+ encrypt_type: 7
+ host: inventory_hostname }}
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# TACACS Server Host Configuration
+ - name: "Tacacs Server Host Configuration"
+ nxos_aaa_server_host:
+ state: present
+ server_type: tacacs
+ tacacs_port: 89
+ host_timeout: 10
+ address: 5.6.7.8
+ host: inventory_hostname }}
+ username: un }}
+ password: pwd }}
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"address": "1.2.3.4", "auth_port": "2084",
+ "host_timeout": "10", "server_type": "radius"}
+existing:
+ description:
+ - k/v pairs of existing configuration
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: always
+ type: dict
+ sample: {"address": "1.2.3.4", "auth_port": "2084",
+ "host_timeout": "10", "server_type": "radius"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["radius-server host 1.2.3.4 auth-port 2084 timeout 10"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0] or response[0] == '\n':
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ if isinstance(response[0], str):
+ body = [json.loads(response[0])]
+ else:
+ body = response
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def _match_dict(match_list, key_map):
+ no_blanks = []
+ match_dict = {}
+
+ for match_set in match_list:
+ match_set = tuple(v for v in match_set if v)
+ no_blanks.append(match_set)
+
+ for info in no_blanks:
+ words = info[0].strip().split()
+ length = len(words)
+ alt_key = key_map.get(words[0])
+ first = alt_key or words[0]
+ last = words[length - 1]
+ match_dict[first] = last.replace('\"', '')
+
+ return match_dict
+
+
+def get_aaa_host_info(module, server_type, address):
+ aaa_host_info = {}
+ command = 'show run | inc {0}-server.host.{1}'.format(server_type, address)
+
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+
+ if body:
+ try:
+ pattern = ('(acct-port \d+)|(timeout \d+)|(auth-port \d+)|'
+ '(key 7 "\w+")|( port \d+)')
+ raw_match = re.findall(pattern, body[0])
+ aaa_host_info = _match_dict(raw_match, {'acct-port': 'acct_port',
+ 'auth-port': 'auth_port',
+ 'port': 'tacacs_port',
+ 'timeout': 'host_timeout'})
+ if aaa_host_info:
+ aaa_host_info['server_type'] = server_type
+ aaa_host_info['address'] = address
+ except TypeError:
+ return {}
+ else:
+ return {}
+
+ return aaa_host_info
+
+
+def config_aaa_host(server_type, address, params, clear=False):
+ cmds = []
+
+ if clear:
+ cmds.append('no {0}-server host {1}'.format(server_type, address))
+
+ cmd_str = '{0}-server host {1}'.format(server_type, address)
+
+ key = params.get('key')
+ enc_type = params.get('encrypt_type', '')
+ host_timeout = params.get('host_timeout')
+ auth_port = params.get('auth_port')
+ acct_port = params.get('acct_port')
+ port = params.get('tacacs_port')
+
+ if auth_port:
+ cmd_str += ' auth-port {0}'.format(auth_port)
+ if acct_port:
+ cmd_str += ' acct-port {0}'.format(acct_port)
+ if port:
+ cmd_str += ' port {0}'.format(port)
+ if host_timeout:
+ cmd_str += ' timeout {0}'.format(host_timeout)
+ if key:
+ cmds.append('{0}-server host {1} key {2} {3}'.format(server_type,
+ address,
+ enc_type, key))
+
+ cmds.append(cmd_str)
+ return cmds
+
+
+def main():
+ argument_spec = dict(
+ server_type=dict(choices=['radius', 'tacacs'], required=True),
+ address=dict(type='str', required=True),
+ key=dict(type='str'),
+ encrypt_type=dict(type='str', choices=['0', '7']),
+ host_timeout=dict(type='str'),
+ auth_port=dict(type='str'),
+ acct_port=dict(type='str'),
+ tacacs_port=dict(type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ server_type = module.params['server_type']
+ address = module.params['address']
+ key = module.params['key']
+ encrypt_type = module.params['encrypt_type']
+ host_timeout = module.params['host_timeout']
+ auth_port = module.params['auth_port']
+ acct_port = module.params['acct_port']
+ tacacs_port = module.params['tacacs_port']
+ state = module.params['state']
+
+ args = dict(server_type=server_type, address=address, key=key,
+ encrypt_type=encrypt_type, host_timeout=host_timeout,
+ auth_port=auth_port, acct_port=acct_port,
+ tacacs_port=tacacs_port)
+
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+ changed = False
+
+ if encrypt_type and not key:
+ module.fail_json(msg='encrypt_type must be used with key')
+
+ if tacacs_port and server_type != 'tacacs':
+ module.fail_json(
+ msg='tacacs_port can only be used with server_type=tacacs')
+
+ if (auth_port or acct_port) and server_type != 'radius':
+ module.fail_json(msg='auth_port and acct_port can only be used'
+ 'when server_type=radius')
+
+
+ existing = get_aaa_host_info(module, server_type, address)
+ end_state = existing
+
+ commands = []
+ if state == 'present':
+ host_timeout = proposed.get('host_timeout')
+ if host_timeout:
+ try:
+ if int(host_timeout) < 1 or int(host_timeout) > 60:
+ raise ValueError
+ except ValueError:
+ module.fail_json(
+ msg='host_timeout must be an integer between 1 and 60')
+
+ delta = dict(
+ set(proposed.iteritems()).difference(existing.iteritems()))
+ if delta:
+ union = existing.copy()
+ union.update(delta)
+ command = config_aaa_host(server_type, address, union)
+ if command:
+ commands.append(command)
+
+ elif state == 'absent':
+ intersect = dict(
+ set(proposed.iteritems()).intersection(existing.iteritems()))
+ if intersect.get('address') and intersect.get('server_type'):
+ command = 'no {0}-server host {1}'.format(
+ intersect.get('server_type'), intersect.get('address'))
+ commands.append(command)
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_aaa_host_info(module, server_type, address)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['updates'] = cmds
+ results['changed'] = changed
+ results['end_state'] = end_state
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_acl.py b/network/nxos/nxos_acl.py
new file mode 100644
index 00000000000..63762f97615
--- /dev/null
+++ b/network/nxos/nxos_acl.py
@@ -0,0 +1,816 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_acl
+version_added: "2.2"
+short_description: Manages access list entries for ACLs.
+description:
+ - Manages access list entries for ACLs.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - C(state=absent) removes the ACE if it exists.
+ - C(state=delete_acl) deleted the ACL if it exists.
+ - For idempotency, use port numbers for the src/dest port
+ params like I(src_port1) and names for the well defined protocols
+ for the I(proto) param.
+ - Although this module is idempotent in that if the ace as presented in
+ the task is identical to the one on the switch, no changes will be made.
+ If there is any difference, what is in Ansible will be pushed (configured
+ options will be overridden). This is to improve security, but at the
+ same time remember an ACE is removed, then re-added, so if there is a
+ change, the new ACE will be exactly what parameters you are sending to
+ the module.
+options:
+ seq:
+ description:
+ - Sequence number of the entry (ACE).
+ required: false
+ default: null
+ name:
+ description:
+ - Case sensitive name of the access list (ACL).
+ required: true
+ action:
+ description:
+ - Action of the ACE.
+ required: false
+ default: null
+ choices: ['permit', 'deny', 'remark']
+ remark:
+ description:
+ - If action is set to remark, this is the description.
+ required: false
+ default: null
+ proto:
+ description:
+ - Port number or protocol (as supported by the switch).
+ required: false
+ default: null
+ src:
+ description:
+ - Source ip and mask using IP/MASK notation and
+ supports keyword 'any'.
+ required: false
+ default: null
+ src_port_op:
+ description:
+ - Source port operands such as eq, neq, gt, lt, range.
+ required: false
+ default: null
+ choices: ['any', 'eq', 'gt', 'lt', 'neq', 'range']
+ src_port1:
+ description:
+ - Port/protocol and also first (lower) port when using range
+ operand.
+ required: false
+ default: null
+ src_port2:
+ description:
+ - Second (end) port when using range operand.
+ required: false
+ default: null
+ dest:
+ description:
+ - Destination ip and mask using IP/MASK notation and supports the
+ keyword 'any'.
+ required: false
+ default: null
+ dest_port_op:
+ description:
+ - Destination port operands such as eq, neq, gt, lt, range.
+ required: false
+ default: null
+ choices: ['any', 'eq', 'gt', 'lt', 'neq', 'range']
+ dest_port1:
+ description:
+ - Port/protocol and also first (lower) port when using range
+ operand.
+ required: false
+ default: null
+ dest_port2:
+ description:
+ - Second (end) port when using range operand.
+ required: false
+ default: null
+ log:
+ description:
+ - Log matches against this entry.
+ required: false
+ default: null
+ choices: ['enable']
+ urg:
+ description:
+ - Match on the URG bit.
+ required: false
+ default: null
+ choices: ['enable']
+ ack:
+ description:
+ - Match on the ACK bit.
+ required: false
+ default: null
+ choices: ['enable']
+ psh:
+ description:
+ - Match on the PSH bit.
+ required: false
+ default: null
+ choices: ['enable']
+ rst:
+ description:
+ - Match on the RST bit.
+ required: false
+ default: null
+ choices: ['enable']
+ syn:
+ description:
+ - Match on the SYN bit.
+ required: false
+ default: null
+ choices: ['enable']
+ fin:
+ description:
+ - Match on the FIN bit.
+ required: false
+ default: null
+ choices: ['enable']
+ established:
+ description:
+ - Match established connections.
+ required: false
+ default: null
+ choices: ['enable']
+ fragments:
+ description:
+ - Check non-initial fragments.
+ required: false
+ default: null
+ choices: ['enable']
+ time-range:
+ description:
+ - Name of time-range to apply.
+ required: false
+ default: null
+ precedence:
+ description:
+ - Match packets with given precedence.
+ required: false
+ default: null
+ choices: ['critical', 'flash', 'flash-override', 'immediate',
+ 'internet', 'network', 'priority', 'routine']
+ dscp:
+ description:
+ - Match packets with given dscp value.
+ required: false
+ default: null
+ choices: ['af11', 'af12', 'af13', 'af21', 'af22', 'af23','af31','af32',
+ 'af33', 'af41', 'af42', 'af43', 'cs1', 'cs2', 'cs3', 'cs4',
+ 'cs5', 'cs6', 'cs7', 'default', 'ef']
+ state:
+ description:
+ - Specify desired state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent','delete_acl']
+'''
+
+EXAMPLES = '''
+
+# configure ACL ANSIBLE
+- nxos_acl:
+ name: ANSIBLE
+ seq: 10
+ action: permit
+ proto: tcp
+ src: 1.1.1.1/24
+ dest: any
+ state: present
+ provider: "{{ nxos_provider }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module.
+ returned: always
+ type: dict
+ sample: {"action": "permit", "dest": "any", "name": "ANSIBLE",
+ "proto": "tcp", "seq": "10", "src": "1.1.1.1/24"}
+existing:
+ description: k/v pairs of existing ACL entries.
+ returned: always
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of ACL entries after module execution.
+ returned: always
+ type: dict
+ sample: {"action": "permit", "dest": "any", "name": "ANSIBLE",
+ "proto": "tcp", "seq": "10", "src": "1.1.1.1/24"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["ip access-list ANSIBLE", "10 permit tcp 1.1.1.1/24 any"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import collections
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, we assume if '^' is found in response,
+ it is an invalid command.
+ """
+ if 'xml' in response[0]:
+ body = []
+ elif '^' in response[0]:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def get_acl(module, acl_name, seq_number):
+ command = 'show ip access-list'
+ new_acl = []
+ saveme = {}
+ seqs = []
+ acl_body = {}
+
+ body = execute_show_command(command, module)[0]
+ all_acl_body = body['TABLE_ip_ipv6_mac']['ROW_ip_ipv6_mac']
+
+ for acl in all_acl_body:
+ if acl.get('acl_name') == acl_name:
+ acl_body = acl
+
+ try:
+ acl_entries = acl_body['TABLE_seqno']['ROW_seqno']
+ acl_name = acl_body.get('acl_name')
+ except KeyError: # could be raised if no ACEs are configured for an ACL
+ return saveme, [{'acl': 'no_entries'}], seqs
+
+ if isinstance(acl_entries, dict):
+ acl_entries = [acl_entries]
+
+ for each in acl_entries:
+ temp = collections.OrderedDict()
+ keep = {}
+ temp['name'] = acl_name
+ temp['seq'] = str(each.get('seqno'))
+ temp['options'] = {}
+ remark = each.get('remark')
+ if remark:
+ temp['remark'] = remark
+ temp['action'] = 'remark'
+ else:
+ temp['action'] = each.get('permitdeny')
+ temp['proto'] = each.get('proto', each.get('proto_str', each.get('ip')))
+ temp['src'] = each.get('src_any', each.get('src_ip_prefix'))
+ temp['src_port_op'] = each.get('src_port_op')
+ temp['src_port1'] = each.get('src_port1_num')
+ temp['src_port2'] = each.get('src_port2_num')
+ temp['dest'] = each.get('dest_any', each.get('dest_ip_prefix'))
+ temp['dest_port_op'] = each.get('dest_port_op')
+ temp['dest_port1'] = each.get('dest_port1_num')
+ temp['dest_port2'] = each.get('dest_port2_num')
+
+ options = collections.OrderedDict()
+ options['log'] = each.get('log')
+ options['urg'] = each.get('urg')
+ options['ack'] = each.get('ack')
+ options['psh'] = each.get('psh')
+ options['rst'] = each.get('rst')
+ options['syn'] = each.get('syn')
+ options['fin'] = each.get('fin')
+ options['established'] = each.get('established')
+ options['dscp'] = each.get('dscp_str')
+ options['precedence'] = each.get('precedence_str')
+ options['fragments'] = each.get('fragments')
+ options['time_range'] = each.get('timerange')
+
+ options_no_null = {}
+ for key, value in options.iteritems():
+ if value is not None:
+ options_no_null[key] = value
+
+ keep['options'] = options_no_null
+
+ for key, value in temp.iteritems():
+ if value:
+ keep[key] = value
+ # ensure options is always in the dict
+ if keep.get('options', 'DNE') == 'DNE':
+ keep['options'] = {}
+
+ if keep.get('seq') == seq_number:
+ saveme = dict(keep)
+
+ seqs.append(str(keep.get('seq')))
+ new_acl.append(keep)
+
+ return saveme, new_acl, seqs
+
+
+def _acl_operand(operand, srcp1, sprcp2):
+ sub_entry = ' ' + operand
+
+ if operand == 'range':
+ sub_entry += ' ' + srcp1 + ' ' + sprcp2
+ else:
+ sub_entry += ' ' + srcp1
+
+ return sub_entry
+
+
+def config_core_acl(proposed):
+ seq = proposed.get('seq')
+ action = proposed.get('action')
+ remark = proposed.get('remark')
+ proto = proposed.get('proto')
+ src = proposed.get('src')
+ src_port_op = proposed.get('src_port_op')
+ src_port1 = proposed.get('src_port1')
+ src_port2 = proposed.get('src_port2')
+
+ dest = proposed.get('dest')
+ dest_port_op = proposed.get('dest_port_op')
+ dest_port1 = proposed.get('dest_port1')
+ dest_port2 = proposed.get('dest_port2')
+
+ ace_start_entries = [action, proto, src]
+ if not remark:
+ ace = seq + ' ' + ' '.join(ace_start_entries)
+ if src_port_op:
+ ace += _acl_operand(src_port_op, src_port1, src_port2)
+ ace += ' ' + dest
+ if dest_port_op:
+ ace += _acl_operand(dest_port_op, dest_port1, dest_port2)
+ else:
+ ace = seq + ' remark ' + remark
+
+ return ace
+
+
+def config_acl_options(options):
+ ENABLE_ONLY = ['psh', 'urg', 'log', 'ack', 'syn',
+ 'established', 'rst', 'fin', 'fragments',
+ 'log']
+
+ OTHER = ['dscp', 'precedence', 'time-range']
+ # packet-length is the only option not currently supported
+
+ if options.get('time_range'):
+ options['time-range'] = options.get('time_range')
+ options.pop('time_range')
+
+ command = ''
+ for option, value in options.iteritems():
+ if option in ENABLE_ONLY:
+ if value == 'enable':
+ command += ' ' + option
+ elif option in OTHER:
+ command += ' ' + option + ' ' + value
+ if command:
+ command = command.strip()
+ return command
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def main():
+ argument_spec = dict(
+ seq=dict(required=False, type='str'),
+ name=dict(required=True, type='str'),
+ action=dict(required=False, choices=['remark', 'permit', 'deny']),
+ remark=dict(required=False, type='str'),
+ proto=dict(required=False, type='str'),
+ src=dict(required=False, type='str'),
+ src_port_op=dict(required=False),
+ src_port1=dict(required=False, type='str'),
+ src_port2=dict(required=False, type='str'),
+ dest=dict(required=False, type='str'),
+ dest_port_op=dict(required=False),
+ dest_port1=dict(required=False, type='str'),
+ dest_port2=dict(required=False, type='str'),
+ log=dict(required=False, choices=['enable']),
+ urg=dict(required=False, choices=['enable']),
+ ack=dict(required=False, choices=['enable']),
+ psh=dict(required=False, choices=['enable']),
+ rst=dict(required=False, choices=['enable']),
+ syn=dict(required=False, choices=['enable']),
+ fragments=dict(required=False, choices=['enable']),
+ fin=dict(required=False, choices=['enable']),
+ established=dict(required=False, choices=['enable']),
+ time_range=dict(required=False),
+ precedence=dict(required=False, choices=['critical', 'flash',
+ 'flash-override',
+ 'immediate', 'internet',
+ 'network', 'priority',
+ 'routine']),
+ dscp=dict(required=False, choices=['af11', 'af12', 'af13', 'af21',
+ 'af22', 'af23', 'af31', 'af32',
+ 'af33', 'af41', 'af42', 'af43',
+ 'cs1', 'cs2', 'cs3', 'cs4',
+ 'cs5', 'cs6', 'cs7', 'default',
+ 'ef']),
+ state=dict(choices=['absent', 'present', 'delete_acl'],
+ default='present'),
+ protocol=dict(choices=['http', 'https'], default='http'),
+ host=dict(required=True),
+ username=dict(type='str'),
+ password=dict(no_log=True, type='str'),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ action = module.params['action']
+ remark = module.params['remark']
+ dscp = module.params['dscp']
+ precedence = module.params['precedence']
+ seq = module.params['seq']
+ name = module.params['name']
+ seq = module.params['seq']
+
+ if action == 'remark' and not remark:
+ module.fail_json(msg='when state is action, remark param is also '
+ 'required')
+
+ REQUIRED = ['seq', 'name', 'action', 'proto', 'src', 'dest']
+ ABSENT = ['name', 'seq']
+ if state == 'present':
+ if action and remark and seq:
+ pass
+ else:
+ for each in REQUIRED:
+ if module.params[each] is None:
+ module.fail_json(msg="req'd params when state is present:",
+ params=REQUIRED)
+ elif state == 'absent':
+ for each in ABSENT:
+ if module.params[each] is None:
+ module.fail_json(msg='require params when state is absent',
+ params=ABSENT)
+ elif state == 'delete_acl':
+ if module.params['name'] is None:
+ module.fail_json(msg="param name req'd when state is delete_acl")
+
+ if dscp and precedence:
+ module.fail_json(msg='only one of the params dscp/precedence '
+ 'are allowed')
+
+ OPTIONS_NAMES = ['log', 'urg', 'ack', 'psh', 'rst', 'syn', 'fin',
+ 'established', 'dscp', 'precedence', 'fragments',
+ 'time_range']
+
+ CORE = ['seq', 'name', 'action', 'proto', 'src', 'src_port_op',
+ 'src_port1', 'src_port2', 'dest', 'dest_port_op',
+ 'dest_port1', 'dest_port2', 'remark']
+
+ proposed_core = dict((param, value) for (param, value) in
+ module.params.iteritems()
+ if param in CORE and value is not None)
+
+ proposed_options = dict((param, value) for (param, value) in
+ module.params.iteritems()
+ if param in OPTIONS_NAMES and value is not None)
+ proposed = {}
+ proposed.update(proposed_core)
+ proposed.update(proposed_options)
+
+ existing_options = {}
+
+ # getting existing existing_core=dict, acl=list, seq=list
+ existing_core, acl, seqs = get_acl(module, name, seq)
+ if existing_core:
+ existing_options = existing_core.get('options')
+ existing_core.pop('options')
+
+ end_state = acl
+ commands = []
+ changed = False
+ delta_core = {}
+ delta_options = {}
+
+ if not existing_core.get('remark'):
+ delta_core = dict(
+ set(proposed_core.iteritems()).difference(
+ existing_core.iteritems())
+ )
+ delta_options = dict(
+ set(proposed_options.iteritems()).difference(
+ existing_options.iteritems())
+ )
+
+ if state == 'present':
+ if delta_core or delta_options:
+ if existing_core: # if the ace exists already
+ commands.append(['no {0}'.format(seq)])
+ if delta_options:
+ myacl_str = config_core_acl(proposed_core)
+ myacl_str += ' ' + config_acl_options(proposed_options)
+ else:
+ myacl_str = config_core_acl(proposed_core)
+ command = [myacl_str]
+ commands.append(command)
+ elif state == 'absent':
+ if existing_core:
+ commands.append(['no {0}'.format(seq)])
+ elif state == 'delete_acl':
+ if acl[0].get('acl') != 'no_entries':
+ commands.append(['no ip access-list {0}'.format(name)])
+
+ results = {}
+ cmds = []
+ if commands:
+ preface = []
+ if state in ['present', 'absent']:
+ preface = ['ip access-list {0}'.format(name)]
+ commands.insert(0, preface)
+
+ cmds = flatten_list(commands)
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ execute_config_command(cmds, module)
+ changed = True
+ new_existing_core, end_state, seqs = get_acl(module, name, seq)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results['proposed'] = proposed
+ results['existing'] = existing_core
+ results['changed'] = changed
+ results['updates'] = cmds
+ results['end_state'] = end_state
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_acl_interface.py b/network/nxos/nxos_acl_interface.py
new file mode 100644
index 00000000000..e9dbcb3fe07
--- /dev/null
+++ b/network/nxos/nxos_acl_interface.py
@@ -0,0 +1,543 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_acl_interface
+version_added: "2.2"
+short_description: Manages applying ACLs to interfaces.
+description:
+ - Manages applying ACLs to interfaces.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+options:
+ name:
+ description:
+ - Case sensitive name of the access list (ACL).
+ required: true
+ interface:
+ description:
+ - Full name of interface, e.g. I(Ethernet1/1).
+ required: true
+ direction:
+ description:
+ - Direction ACL to be applied in on the interface.
+ required: true
+ choices: ['ingress', 'egress']
+ state:
+ description:
+ - Specify desired state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+- name: apply egress acl to ethernet1/41
+ nxos_acl_interface:
+ name: ANSIBLE
+ interface: ethernet1/41
+ direction: egress
+ state: present
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"direction": "egress", "interface": "ethernet1/41",
+ "name": "ANSIBLE"}
+existing:
+ description: k/v pairs of existing ACL applied to the interface
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of interface ACL after module execution
+ returned: always
+ type: dict
+ sample: {"direction": "egress", "interface": "ethernet1/41",
+ "name": "ANSIBLE"}
+acl_applied_to:
+ description: list of interfaces the ACL is applied to
+ returned: always
+ type: list
+ sample: [{"acl_type": "Router ACL", "direction": "egress",
+ "interface": "Ethernet1/41", "name": "ANSIBLE"}]
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["interface ethernet1/41", "ip access-group ANSIBLE out"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import collections
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, we assume if '^' is found in response,
+ it is an invalid command.
+ """
+ if 'xml' in response[0]:
+ body = []
+ elif '^' in response[0] or 'summary' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'summary' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def get_acl_interface(module, acl):
+ command = 'show ip access-list summary'
+ name_regex = '.*IPV4\s+ACL\s+(?P\S+).*'
+ interface_regex = ('.*\s+(?P\w+(\d+)?\/?(\d+)?)\s-\s'
+ '(?P\w+)\s+\W(?P\w+\s\w+)\W.*')
+ acl_list = []
+
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+ body_split = body[0].split('Active on interfaces:')
+
+ for each_acl in body_split:
+ intf_list = []
+ temp = {}
+ try:
+ match_name = re.match(name_regex, each_acl, re.DOTALL)
+ name_dict = match_name.groupdict()
+ name = name_dict['name']
+ except AttributeError:
+ name = ''
+
+ temp['interfaces'] = []
+ for line in each_acl.split('\n'):
+ intf_temp = {}
+ try:
+ match_interface = re.match(interface_regex, line, re.DOTALL)
+ interface_dict = match_interface.groupdict()
+ interface = interface_dict['interface']
+ direction = interface_dict['direction']
+ acl_type = interface_dict['acl_type']
+ except AttributeError:
+ interface = ''
+ direction = ''
+ acl_type = ''
+
+ if interface:
+ intf_temp['interface'] = interface
+ if acl_type:
+ intf_temp['acl_type'] = acl_type
+ if direction:
+ intf_temp['direction'] = direction
+ if intf_temp:
+ temp['interfaces'].append(intf_temp)
+ if name:
+ temp['name'] = name
+
+ if temp:
+ acl_list.append(temp)
+
+ existing_no_null = []
+ for each in acl_list:
+ if each.get('name') == acl:
+ interfaces = each.get('interfaces')
+ for interface in interfaces:
+ new_temp = {}
+ new_temp['name'] = acl
+ new_temp.update(interface)
+ existing_no_null.append(new_temp)
+ return existing_no_null
+
+
+def other_existing_acl(get_existing, interface, direction):
+ # now we'll just get the interface in question
+ # needs to be a list since same acl could be applied in both dirs
+ acls_interface = []
+ if get_existing:
+ for each in get_existing:
+ if each.get('interface').lower() == interface:
+ acls_interface.append(each)
+ else:
+ acls_interface = []
+
+ if acls_interface:
+ this = {}
+ for each in acls_interface:
+ if each.get('direction') == direction:
+ this = each
+ else:
+ acls_interface = []
+ this = {}
+
+ return acls_interface, this
+
+
+def apply_acl(proposed):
+ commands = []
+
+ commands.append('interface ' + proposed.get('interface'))
+ direction = proposed.get('direction')
+ if direction == 'egress':
+ cmd = 'ip access-group {0} {1}'.format(proposed.get('name'), 'out')
+ elif direction == 'ingress':
+ cmd = 'ip access-group {0} {1}'.format(proposed.get('name'), 'in')
+ commands.append(cmd)
+
+ return commands
+
+
+def remove_acl(proposed):
+ commands = []
+
+ commands.append('interface ' + proposed.get('interface'))
+ direction = proposed.get('direction')
+ if direction == 'egress':
+ cmd = 'no ip access-group {0} {1}'.format(proposed.get('name'), 'out')
+ elif direction == 'ingress':
+ cmd = 'no ip access-group {0} {1}'.format(proposed.get('name'), 'in')
+ commands.append(cmd)
+
+ return commands
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=False, type='str'),
+ interface=dict(required=True),
+ direction=dict(required=True, choices=['egress', 'ingress']),
+ state=dict(choices=['absent', 'present'],
+ default='present'),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ name = module.params['name']
+ interface = module.params['interface'].lower()
+ direction = module.params['direction'].lower()
+
+ proposed = dict(name=name, interface=interface, direction=direction)
+
+ # includes all interfaces the ACL is applied to (list)
+ get_existing = get_acl_interface(module, name)
+
+ # interface_acls = includes entries of this ACL on the interface (list)
+ # this_dir_acl_intf = dict - not null if it already exists
+ interfaces_acls, existing = other_existing_acl(
+ get_existing, interface, direction)
+
+ end_state = existing
+ end_state_acls = get_existing
+ changed = False
+
+ cmds = []
+ commands = []
+ if state == 'present':
+ if not existing:
+ command = apply_acl(proposed)
+ if command:
+ commands.append(command)
+
+ elif state == 'absent':
+ if existing:
+ command = remove_acl(proposed)
+ if command:
+ commands.append(command)
+
+ if commands:
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ execute_config_command(cmds, module)
+ changed = True
+ end_state_acls = get_acl_interface(module, name)
+ interfaces_acls, this_dir_acl_intf = other_existing_acl(
+ end_state_acls, interface, direction)
+ end_state = this_dir_acl_intf
+ if 'configure' in cmds:
+ cmds.pop(0)
+ else:
+ cmds = []
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['updates'] = cmds
+ results['changed'] = changed
+ results['end_state'] = end_state
+ results['acl_applied_to'] = end_state_acls
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_bgp.py b/network/nxos/nxos_bgp.py
new file mode 100644
index 00000000000..a6306fe74cc
--- /dev/null
+++ b/network/nxos/nxos_bgp.py
@@ -0,0 +1,1012 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_bgp
+version_added: "2.2"
+short_description: Manages BGP configuration.
+description:
+ - Manages BGP configurations on NX-OS switches.
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - C(state=absent) removes the whole BGP ASN configuration when
+ C(vrf=default) or the whole VRF instance within the BGP process when
+ using a different VRF.
+ - Default when supported restores params default value.
+ - Configuring global parmas is only permitted if C(vrf=default).
+options:
+ asn:
+ description:
+ - BGP autonomous system number. Valid values are String,
+ Integer in ASPLAIN or ASDOT notation.
+ required: true
+ vrf:
+ description:
+ - Name of the VRF. The name 'default' is a valid VRF representing
+ the global BGP.
+ required: false
+ default: null
+ bestpath_always_compare_med:
+ description:
+ - Enable/Disable MED comparison on paths from different
+ autonomous systems.
+ required: false
+ choices: ['true','false']
+ default: null
+ bestpath_aspath_multipath_relax:
+ description:
+ - Enable/Disable load sharing across the providers with
+ different (but equal-length) AS paths.
+ required: false
+ choices: ['true','false']
+ default: null
+ bestpath_compare_routerid:
+ description:
+ - Enable/Disable comparison of router IDs for identical eBGP paths.
+ required: false
+ choices: ['true','false']
+ default: null
+ bestpath_cost_community_ignore:
+ description:
+ - Enable/Disable Ignores the cost community for BGP best-path
+ calculations.
+ required: false
+ choices: ['true','false']
+ default: null
+ bestpath_med_confed:
+ description:
+ - Enable/Disable enforcement of bestpath to do a MED comparison
+ only between paths originated within a confederation.
+ required: false
+ choices: ['true','false']
+ default: null
+ bestpath_med_missing_as_worst:
+ description:
+ - Enable/Disable assigns the value of infinity to received
+ routes that do not carry the MED attribute, making these routes
+ the least desirable.
+ required: false
+ choices: ['true','false']
+ default: null
+ bestpath_med_non_deterministic:
+ description:
+ - Enable/Disable deterministic selection of the best MED pat
+ from among the paths from the same autonomous system.
+ required: false
+ choices: ['true','false']
+ default: null
+ cluster_id:
+ description:
+ - Route Reflector Cluster-ID.
+ required: false
+ default: null
+ confederation_id:
+ description:
+ - Routing domain confederation AS.
+ required: false
+ default: null
+ confederation_peers:
+ description:
+ - AS confederation parameters.
+ required: false
+ default: null
+ disable_policy_batching:
+ description:
+ - Enable/Disable the batching evaluation of prefix advertisement
+ to all peers.
+ required: false
+ choices: ['true','false']
+ default: null
+ disable_policy_batching_ipv4_prefix_list:
+ description:
+ - Enable/Disable the batching evaluation of prefix advertisements
+ to all peers with prefix list.
+ required: false
+ default: null
+ disable_policy_batching_ipv6_prefix_list:
+ description:
+ - Enable/Disable the batching evaluation of prefix advertisements
+ to all peers with prefix list.
+ required: false
+ enforce_first_as:
+ description:
+ - Enable/Disable enforces the neighbor autonomous system to be
+ the first AS number listed in the AS path attribute for eBGP.
+ On NX-OS, this property is only supported in the
+ global BGP context.
+ required: false
+ choices: ['true','false']
+ default: null
+ event_history_cli:
+ description:
+ - Enable/Disable cli event history buffer.
+ required: false
+ choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default']
+ default: null
+ event_history_detail:
+ description:
+ - Enable/Disable detail event history buffer.
+ required: false
+ choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default']
+ default: null
+ event_history_events:
+ description:
+ - Enable/Disable event history buffer.
+ required: false
+ choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default']
+ default: null
+ event_history_periodic:
+ description:
+ - Enable/Disable periodic event history buffer.
+ required: false
+ choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default']
+ fast_external_fallover:
+ description:
+ - Enable/Disable immediately reset the session if the link to a
+ directly connected BGP peer goes down. Only supported in the
+ global BGP context.
+ required: false
+ choices: ['true','false']
+ default: null
+ flush_routes:
+ description:
+ - Enable/Disable flush routes in RIB upon controlled restart.
+ On NX-OS, this property is only supported in the global
+ BGP context.
+ required: false
+ choices: ['true','false']
+ default: null
+ graceful_restart:
+ description:
+ - Enable/Disable graceful restart.
+ required: false
+ choices: ['true','false']
+ default: null
+ graceful_restart_helper:
+ description:
+ - Enable/Disable graceful restart helper mode.
+ required: false
+ choices: ['true','false']
+ default: null
+ graceful_restart_timers_restart:
+ description:
+ - Set maximum time for a restart sent to the BGP peer.
+ required: false
+ choices: ['true','false']
+ default: null
+ graceful_restart_timers_stalepath_time:
+ description:
+ - Set maximum time that BGP keeps the stale routes from the
+ restarting BGP peer.
+ choices: ['true','false']
+ default: null
+ isolate:
+ description:
+ - Enable/Disable isolate this router from BGP perspective.
+ required: false
+ choices: ['true','false']
+ default: null
+ local_as:
+ description:
+ - Local AS number to be used within a VRF instance.
+ required: false
+ default: null
+ log_neighbor_changes:
+ description:
+ - Enable/Disable message logging for neighbor up/down event.
+ required: false
+ choices: ['true','false']
+ default: null
+ maxas_limit:
+ description:
+ - Specify Maximum number of AS numbers allowed in the AS-path
+ attribute. Valid values are between 1 and 512.
+ required: false
+ default: null
+ neighbor_down_fib_accelerate:
+ description:
+ - Enable/Disable handle BGP neighbor down event, due to
+ various reasons.
+ required: false
+ choices: ['true','false']
+ default: null
+ reconnect_interval:
+ description:
+ - The BGP reconnection interval for dropped sessions.
+ Valid values are between 1 and 60.
+ required: false
+ default: null
+ router_id:
+ description:
+ - Router Identifier (ID) of the BGP router VRF instance.
+ required: false
+ default: null
+ shutdown:
+ description:
+ - Administratively shutdown the BGP protocol.
+ required: false
+ choices: ['true','false']
+ default: null
+ suppress_fib_pending:
+ description:
+ - Enable/Disable advertise only routes programmed in hardware
+ to peers.
+ required: false
+ choices: ['true','false']
+ default: null
+ timer_bestpath_limit:
+ description:
+ - Specify timeout for the first best path after a restart,
+ in seconds.
+ required: false
+ default: null
+ timer_bestpath_limit_always:
+ description:
+ - Enable/Disable update-delay-always option.
+ required: false
+ choices: ['true','false']
+ default: null
+ timer_bgp_hold:
+ description:
+ - Set BGP hold timer.
+ required: false
+ default: null
+ timer_bgp_keepalive:
+ description:
+ - Set BGP keepalive timer.
+ required: false
+ default: null
+ state:
+ description:
+ - Determines whether the config should be present or not
+ on the device.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+
+
+EXAMPLES = '''
+- name: Configure a simple ASN
+ nxos_bgp:
+ asn: 65535
+ vrf: test
+ router_id: 1.1.1.1
+ state: present
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"}
+existing:
+ description: k/v pairs of existing BGP configuration
+ returned: verbose mode
+ type: dict
+ sample: {"asn": "65535", "bestpath_always_compare_med": false,
+ "bestpath_aspath_multipath_relax": false,
+ "bestpath_compare_neighborid": false,
+ "bestpath_compare_routerid": false,
+ "bestpath_cost_community_ignore": false,
+ "bestpath_med_confed": false,
+ "bestpath_med_missing_as_worst": false,
+ "bestpath_med_non_deterministic": false, "cluster_id": "",
+ "confederation_id": "", "confederation_peers": "",
+ "graceful_restart": true, "graceful_restart_helper": false,
+ "graceful_restart_timers_restart": "120",
+ "graceful_restart_timers_stalepath_time": "300", "local_as": "",
+ "log_neighbor_changes": false, "maxas_limit": "",
+ "neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
+ "router_id": "11.11.11.11", "suppress_fib_pending": false,
+ "timer_bestpath_limit": "", "timer_bgp_hold": "180",
+ "timer_bgp_keepalive": "60", "vrf": "test"}
+end_state:
+ description: k/v pairs of BGP configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"asn": "65535", "bestpath_always_compare_med": false,
+ "bestpath_aspath_multipath_relax": false,
+ "bestpath_compare_neighborid": false,
+ "bestpath_compare_routerid": false,
+ "bestpath_cost_community_ignore": false,
+ "bestpath_med_confed": false,
+ "bestpath_med_missing_as_worst": false,
+ "bestpath_med_non_deterministic": false, "cluster_id": "",
+ "confederation_id": "", "confederation_peers": "",
+ "graceful_restart": true, "graceful_restart_helper": false,
+ "graceful_restart_timers_restart": "120",
+ "graceful_restart_timers_stalepath_time": "300", "local_as": "",
+ "log_neighbor_changes": false, "maxas_limit": "",
+ "neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
+ "router_id": "1.1.1.1", "suppress_fib_pending": false,
+ "timer_bestpath_limit": "", "timer_bgp_hold": "180",
+ "timer_bgp_keepalive": "60", "vrf": "test"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.shell import ShellError
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+WARNINGS = []
+BOOL_PARAMS = [
+ 'bestpath_always_compare_med',
+ 'bestpath_aspath_multipath_relax',
+ 'bestpath_compare_neighborid',
+ 'bestpath_compare_routerid',
+ 'bestpath_cost_community_ignore',
+ 'bestpath_med_confed',
+ 'bestpath_med_missing_as_worst',
+ 'bestpath_med_non_deterministic',
+ 'disable_policy_batching',
+ 'enforce_first_as',
+ 'fast_external_fallover',
+ 'flush_routes',
+ 'graceful_restart',
+ 'graceful_restart_helper',
+ 'isolate',
+ 'log_neighbor_changes',
+ 'neighbor_down_fib_accelerate',
+ 'shutdown',
+ 'suppress_fib_pending'
+]
+GLOBAL_PARAMS = [
+ 'disable_policy_batching',
+ 'disable_policy_batching_ipv4_prefix_list',
+ 'disable_policy_batching_ipv6_prefix_list',
+ 'enforce_first_as',
+ 'event_history_cli',
+ 'event_history_detail',
+ 'event_history_events',
+ 'event_history_periodic',
+ 'fast_external_fallover',
+ 'flush_routes',
+ 'isolate',
+ 'shutdown'
+]
+PARAM_TO_DEFAULT_KEYMAP = {
+ 'timer_bgp_keepalive': '60',
+ 'timer_bgp_hold': '180',
+ 'graceful_restart': True,
+ 'graceful_restart_timers_restart': '120',
+ 'graceful_restart_timers_stalepath_time': '300',
+ 'reconnect_interval': '60',
+ 'suppress_fib_pending': True,
+ 'fast_external_fallover': True,
+ 'enforce_first_as': True,
+ 'event_history_periodic': True,
+ 'event_history_cli': True,
+ 'event_history_events': True
+}
+PARAM_TO_COMMAND_KEYMAP = {
+ 'asn': 'router bgp',
+ 'bestpath_always_compare_med': 'bestpath always-compare-med',
+ 'bestpath_aspath_multipath_relax': 'bestpath as-path multipath-relax',
+ 'bestpath_compare_neighborid': 'bestpath compare-neighborid',
+ 'bestpath_compare_routerid': 'bestpath compare-routerid',
+ 'bestpath_cost_community_ignore': 'bestpath cost-community ignore',
+ 'bestpath_med_confed': 'bestpath med confed',
+ 'bestpath_med_missing_as_worst': 'bestpath med missing-as-worst',
+ 'bestpath_med_non_deterministic': 'bestpath med non-deterministic',
+ 'cluster_id': 'cluster-id',
+ 'confederation_id': 'confederation identifier',
+ 'confederation_peers': 'confederation peers',
+ 'disable_policy_batching': 'disable-policy-batching',
+ 'disable_policy_batching_ipv4_prefix_list': 'disable-policy-batching ipv4 prefix-list',
+ 'disable_policy_batching_ipv6_prefix_list': 'disable-policy-batching ipv6 prefix-list',
+ 'enforce_first_as': 'enforce-first-as',
+ 'event_history_cli': 'event-history cli',
+ 'event_history_detail': 'event-history detail',
+ 'event_history_events': 'event-history events',
+ 'event_history_periodic': 'event-history periodic',
+ 'fast_external_fallover': 'fast-external-fallover',
+ 'flush_routes': 'flush-routes',
+ 'graceful_restart': 'graceful-restart',
+ 'graceful_restart_helper': 'graceful-restart-helper',
+ 'graceful_restart_timers_restart': 'graceful-restart restart-time',
+ 'graceful_restart_timers_stalepath_time': 'graceful-restart stalepath-time',
+ 'isolate': 'isolate',
+ 'local_as': 'local-as',
+ 'log_neighbor_changes': 'log-neighbor-changes',
+ 'maxas_limit': 'maxas-limit',
+ 'neighbor_down_fib_accelerate': 'neighbor-down fib-accelerate',
+ 'reconnect_interval': 'reconnect-interval',
+ 'router_id': 'router-id',
+ 'shutdown': 'shutdown',
+ 'suppress_fib_pending': 'suppress-fib-pending',
+ 'timer_bestpath_limit': 'timers bestpath-limit',
+ 'timer_bgp_hold': 'timers bgp',
+ 'timer_bgp_keepalive': 'timers bgp',
+ 'vrf': 'vrf'
+}
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_custom_value(config, arg):
+ if arg.startswith('event_history'):
+ REGEX_SIZE = re.compile(r'(?:{0} size\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+
+ if 'no {0}'.format(PARAM_TO_COMMAND_KEYMAP[arg]) in config:
+ pass
+ elif PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ try:
+ value = REGEX_SIZE.search(config).group('value')
+ except AttributeError:
+ if REGEX.search(config):
+ value = True
+
+ elif arg == 'enforce_first_as' or arg == 'fast_external_fallover':
+ REGEX = re.compile(r'no\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = True
+ try:
+ if REGEX.search(config):
+ value = False
+ except TypeError:
+ value = True
+
+ elif arg == 'confederation_peers':
+ REGEX = re.compile(r'(?:confederation peers\s)(?P.*)$', re.M)
+ value = ''
+ if 'confederation peers' in config:
+ value = REGEX.search(config).group('value').split()
+
+ elif arg == 'timer_bgp_keepalive':
+ REGEX = re.compile(r'(?:timers bgp\s)(?P.*)$', re.M)
+ value = ''
+ if 'timers bgp' in config:
+ parsed = REGEX.search(config).group('value').split()
+ value = parsed[0]
+
+ elif arg == 'timer_bgp_hold':
+ REGEX = re.compile(r'(?:timers bgp\s)(?P.*)$', re.M)
+ value = ''
+ if 'timers bgp' in config:
+ parsed = REGEX.search(config).group('value').split()
+ if len(parsed) == 2:
+ value = parsed[1]
+
+ return value
+
+
+def get_value(arg, config):
+ custom = [
+ 'event_history_cli',
+ 'event_history_events',
+ 'event_history_periodic',
+ 'event_history_detail',
+ 'confederation_peers',
+ 'timer_bgp_hold',
+ 'timer_bgp_keepalive',
+ 'enforce_first_as',
+ 'fast_external_fallover'
+ ]
+
+ if arg in custom:
+ value = get_custom_value(config, arg)
+ elif arg in BOOL_PARAMS:
+ REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+ try:
+ if REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+ else:
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ netcfg = get_config(module)
+
+ try:
+ asn_regex = '.*router\sbgp\s(?P\d+).*'
+ match_asn = re.match(asn_regex, str(netcfg), re.DOTALL)
+ existing_asn_group = match_asn.groupdict()
+ existing_asn = existing_asn_group['existing_asn']
+ except AttributeError:
+ existing_asn = ''
+
+ if existing_asn:
+ bgp_parent = 'router bgp {0}'.format(existing_asn)
+ if module.params['vrf'] != 'default':
+ parents = [bgp_parent, 'vrf {0}'.format(module.params['vrf'])]
+ else:
+ parents = [bgp_parent]
+
+ config = netcfg.get_section(parents)
+ if config:
+ for arg in args:
+ if arg != 'asn':
+ if module.params['vrf'] != 'default':
+ if arg not in GLOBAL_PARAMS:
+ existing[arg] = get_value(arg, config)
+ else:
+ existing[arg] = get_value(arg, config)
+
+ existing['asn'] = existing_asn
+ if module.params['vrf'] == 'default':
+ existing['vrf'] = 'default'
+ else:
+ if (module.params['state'] == 'present' and
+ module.params['vrf'] != 'default'):
+ msg = ("VRF {0} doesn't exist. ".format(module.params['vrf']))
+ WARNINGS.append(msg)
+ else:
+ if (module.params['state'] == 'present' and
+ module.params['vrf'] != 'default'):
+ msg = ("VRF {0} doesn't exist. ".format(module.params['vrf']))
+ WARNINGS.append(msg)
+
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def state_present(module, existing, proposed, candidate):
+ commands = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ for key, value in proposed_commands.iteritems():
+ if value is True:
+ commands.append(key)
+ elif value is False:
+ commands.append('no {0}'.format(key))
+ elif value == 'default':
+ if key in PARAM_TO_DEFAULT_KEYMAP:
+ commands.append('{0} {1}'.format(key, PARAM_TO_DEFAULT_KEYMAP[key]))
+ elif existing_commands.get(key):
+ existing_value = existing_commands.get(key)
+ if key == 'confederation peers':
+ commands.append('no {0} {1}'.format(key, ' '.join(existing_value)))
+ else:
+ commands.append('no {0} {1}'.format(key, existing_value))
+ else:
+ if key == 'confederation peers':
+ existing_confederation_peers = existing.get('confederation_peers')
+
+ if existing_confederation_peers:
+ if not isinstance(existing_confederation_peers, list):
+ existing_confederation_peers = [existing_confederation_peers]
+ else:
+ existing_confederation_peers = []
+
+ values = value.split()
+ for each_value in values:
+ if each_value not in existing_confederation_peers:
+ existing_confederation_peers.append(each_value)
+ peer_string = ' '.join(existing_confederation_peers)
+ commands.append('{0} {1}'.format(key, peer_string))
+ elif key.startswith('timers bgp'):
+ command = 'timers bgp {0} {1}'.format(
+ proposed['timer_bgp_keepalive'],
+ proposed['timer_bgp_hold'])
+ if command not in commands:
+ commands.append(command)
+ else:
+ if value.startswith('size'):
+ value = value.replace('_', ' ')
+ command = '{0} {1}'.format(key, value)
+ commands.append(command)
+
+ if commands:
+ commands = fix_commands(commands)
+ parents = ['router bgp {0}'.format(module.params['asn'])]
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+ candidate.add(commands, parents=parents)
+ else:
+ if len(proposed.keys()) == 0:
+ if module.params['vrf'] != 'default':
+ commands.append('vrf {0}'.format(module.params['vrf']))
+ parents = ['router bgp {0}'.format(module.params['asn'])]
+ else:
+ commands.append('router bgp {0}'.format(module.params['asn']))
+ parents = []
+ candidate.add(commands, parents=parents)
+
+
+def state_absent(module, existing, proposed, candidate):
+ commands = []
+ parents = []
+ if module.params['vrf'] == 'default':
+ commands.append('no router bgp {0}'.format(module.params['asn']))
+ else:
+ if existing.get('vrf') == module.params['vrf']:
+ commands.append('no vrf {0}'.format(module.params['vrf']))
+ parents = ['router bgp {0}'.format(module.params['asn'])]
+
+ candidate.add(commands, parents=parents)
+
+
+def fix_commands(commands):
+ local_as_command = ''
+ confederation_id_command = ''
+ confederation_peers_command = ''
+
+ for command in commands:
+ if 'local-as' in command:
+ local_as_command = command
+ elif 'confederation identifier' in command:
+ confederation_id_command = command
+ elif 'confederation peers' in command:
+ confederation_peers_command = command
+
+ if local_as_command and confederation_id_command:
+ commands.pop(commands.index(local_as_command))
+ commands.pop(commands.index(confederation_id_command))
+ commands.append(local_as_command)
+ commands.append(confederation_id_command)
+
+ elif confederation_peers_command and confederation_id_command:
+ commands.pop(commands.index(confederation_peers_command))
+ commands.pop(commands.index(confederation_id_command))
+ commands.append(confederation_id_command)
+ commands.append(confederation_peers_command)
+
+ return commands
+
+
+def main():
+ argument_spec = dict(
+ asn=dict(required=True, type='str'),
+ vrf=dict(required=False, type='str', default='default'),
+ bestpath_always_compare_med=dict(required=False, type='bool'),
+ bestpath_aspath_multipath_relax=dict(required=False, type='bool'),
+ bestpath_compare_neighborid=dict(required=False, type='bool'),
+ bestpath_compare_routerid=dict(required=False, type='bool'),
+ bestpath_cost_community_ignore=dict(required=False, type='bool'),
+ bestpath_med_confed=dict(required=False, type='bool'),
+ bestpath_med_missing_as_worst=dict(required=False, type='bool'),
+ bestpath_med_non_deterministic=dict(required=False, type='bool'),
+ cluster_id=dict(required=False, type='str'),
+ confederation_id=dict(required=False, type='str'),
+ confederation_peers=dict(required=False, type='str'),
+ disable_policy_batching=dict(required=False, type='bool'),
+ disable_policy_batching_ipv4_prefix_list=dict(required=False, type='str'),
+ disable_policy_batching_ipv6_prefix_list=dict(required=False, type='str'),
+ enforce_first_as=dict(required=False, type='bool'),
+ event_history_cli=dict(required=False, choices=['true', 'false', 'default', 'size_small', 'size_medium', 'size_large', 'size_disable']),
+ event_history_detail=dict(required=False, choices=['true', 'false', 'default', 'size_small', 'size_medium', 'size_large', 'size_disable']),
+ event_history_events=dict(required=False, choices=['true', 'false', 'default' 'size_small', 'size_medium', 'size_large', 'size_disable']),
+ event_history_periodic=dict(required=False, choices=['true', 'false', 'default', 'size_small', 'size_medium', 'size_large', 'size_disable']),
+ fast_external_fallover=dict(required=False, type='bool'),
+ flush_routes=dict(required=False, type='bool'),
+ graceful_restart=dict(required=False, type='bool'),
+ graceful_restart_helper=dict(required=False, type='bool'),
+ graceful_restart_timers_restart=dict(required=False, type='str'),
+ graceful_restart_timers_stalepath_time=dict(required=False, type='str'),
+ isolate=dict(required=False, type='bool'),
+ local_as=dict(required=False, type='str'),
+ log_neighbor_changes=dict(required=False, type='bool'),
+ maxas_limit=dict(required=False, type='str'),
+ neighbor_down_fib_accelerate=dict(required=False, type='bool'),
+ reconnect_interval=dict(required=False, type='str'),
+ router_id=dict(required=False, type='str'),
+ shutdown=dict(required=False, type='bool'),
+ suppress_fib_pending=dict(required=False, type='bool'),
+ timer_bestpath_limit=dict(required=False, type='str'),
+ timer_bgp_hold=dict(required=False, type='str'),
+ timer_bgp_keepalive=dict(required=False, type='str'),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ required_together=[['timer_bgp_hold',
+ 'timer_bgp_keepalive']],
+ supports_check_mode=True)
+
+ state = module.params['state']
+ args = [
+ "asn",
+ "bestpath_always_compare_med",
+ "bestpath_aspath_multipath_relax",
+ "bestpath_compare_neighborid",
+ "bestpath_compare_routerid",
+ "bestpath_cost_community_ignore",
+ "bestpath_med_confed",
+ "bestpath_med_missing_as_worst",
+ "bestpath_med_non_deterministic",
+ "cluster_id",
+ "confederation_id",
+ "confederation_peers",
+ "disable_policy_batching",
+ "disable_policy_batching_ipv4_prefix_list",
+ "disable_policy_batching_ipv6_prefix_list",
+ "enforce_first_as",
+ "event_history_cli",
+ "event_history_detail",
+ "event_history_events",
+ "event_history_periodic",
+ "fast_external_fallover",
+ "flush_routes",
+ "graceful_restart",
+ "graceful_restart_helper",
+ "graceful_restart_timers_restart",
+ "graceful_restart_timers_stalepath_time",
+ "isolate",
+ "local_as",
+ "log_neighbor_changes",
+ "maxas_limit",
+ "neighbor_down_fib_accelerate",
+ "reconnect_interval",
+ "router_id",
+ "shutdown",
+ "suppress_fib_pending",
+ "timer_bestpath_limit",
+ "timer_bgp_hold",
+ "timer_bgp_keepalive",
+ "vrf"
+ ]
+
+ if module.params['vrf'] != 'default':
+ for param, inserted_value in module.params.iteritems():
+ if param in GLOBAL_PARAMS and inserted_value:
+ module.fail_json(msg='Global params can be modified only'
+ ' under "default" VRF.',
+ vrf=module.params['vrf'],
+ global_param=param)
+
+ existing = invoke('get_existing', module, args)
+
+ if existing.get('asn'):
+ if (existing.get('asn') != module.params['asn'] and
+ state == 'present'):
+ module.fail_json(msg='Another BGP ASN already exists.',
+ proposed_asn=module.params['asn'],
+ existing_asn=existing.get('asn'))
+
+ end_state = existing
+ proposed_args = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+ proposed = {}
+ for key, value in proposed_args.iteritems():
+ if key != 'asn' and key != 'vrf':
+ if str(value).lower() == 'default':
+ value = PARAM_TO_DEFAULT_KEYMAP.get(key)
+ if value is None:
+ value = 'default'
+ if existing.get(key) or (not existing.get(key) and value):
+ proposed[key] = value
+
+ result = {}
+ if (state == 'present' or (state == 'absent' and
+ existing.get('asn') == module.params['asn'])):
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed_args
+
+ if WARNINGS:
+ result['warnings'] = WARNINGS
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_bgp_af.py b/network/nxos/nxos_bgp_af.py
new file mode 100644
index 00000000000..3b804d51a3c
--- /dev/null
+++ b/network/nxos/nxos_bgp_af.py
@@ -0,0 +1,1110 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_bgp_af
+version_added: "2.2"
+short_description: Manages BGP Address-family configuration.
+description:
+ - Manages BGP Address-family configurations on NX-OS switches.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - C(state=absent) removes the whole BGP ASN configuration
+ - Default, where supported, restores params default value.
+options:
+ asn:
+ description:
+ - BGP autonomous system number. Valid values are String,
+ Integer in ASPLAIN or ASDOT notation.
+ required: true
+ vrf:
+ description:
+ - Name of the VRF. The name 'default' is a valid VRF representing
+ the global bgp.
+ required: true
+ afi:
+ description:
+ - Address Family Identifier.
+ required: true
+ choices: ['ipv4','ipv6', 'vpnv4', 'vpnv6', 'l2vpn']
+ safi:
+ description:
+ - Sub Address Family Identifier.
+ required: true
+ choices: ['unicast','multicast', 'evpn']
+ additional_paths_install:
+ description:
+ - Install a backup path into the forwarding table and provide
+ prefix independent convergence (PIC) in case of a PE-CE link
+ failure.
+ required: false
+ choices: ['true','false']
+ default: null
+ additional_paths_receive:
+ description:
+ - Enables the receive capability of additional paths for all of
+ the neighbors under this address family for which the capability
+ has not been disabled.
+ required: false
+ choices: ['true','false']
+ default: null
+ additional_paths_selection:
+ description:
+ - Configures the capability of selecting additional paths for
+ a prefix. Valid values are a string defining the name of
+ the route-map.
+ required: false
+ default: null
+ additional_paths_send:
+ description:
+ - Enables the send capability of additional paths for all of
+ the neighbors under this address family for which the capability
+ has not been disabled.
+ required: false
+ choices: ['true','false']
+ default: null
+ advertise_l2vpn_evpn:
+ description:
+ - Advertise evpn routes.
+ required: false
+ choices: ['true','false']
+ default: null
+ client_to_client:
+ description:
+ - Configure client-to-client route reflection.
+ required: false
+ choices: ['true','false']
+ default: null
+ dampen_igp_metric:
+ description:
+ - Specify dampen value for IGP metric-related changes, in seconds.
+ Valid values are integer and keyword 'default'.
+ required: false
+ default: null
+ dampening_state:
+ description:
+ - Enable/disable route-flap dampening.
+ required: false
+ choices: ['true','false']
+ default: null
+ dampening_half_time:
+ description:
+ - Specify decay half-life in minutes for route-flap dampening.
+ Valid values are integer and keyword 'default'.
+ required: false
+ default: null
+ dampening_max_suppress_time:
+ description:
+ - Specify max suppress time for route-flap dampening stable route.
+ Valid values are integer and keyword 'default'.
+ required: false
+ default: null
+ dampening_reuse_time:
+ description:
+ - Specify route reuse time for route-flap dampening.
+ Valid values are integer and keyword 'default'.
+ required: false
+ dampening_routemap:
+ description:
+ - Specify route-map for route-flap dampening. Valid values are a
+ string defining the name of the route-map.
+ required: false
+ default: null
+ dampening_suppress_time:
+ description:
+ - Specify route suppress time for route-flap dampening.
+ Valid values are integer and keyword 'default'.
+ required: false
+ default: null
+ default_information_originate:
+ description:
+ - Default information originate.
+ required: false
+ choices: ['true','false']
+ default: null
+ default_metric:
+ description:
+ - Sets default metrics for routes redistributed into BGP.
+ Valid values are Integer or keyword 'default'
+ required: false
+ default: null
+ distance_ebgp:
+ description:
+ - Sets the administrative distance for eBGP routes.
+ Valid values are Integer or keyword 'default'.
+ required: false
+ default: null
+ distance_ibgp:
+ description:
+ - Sets the administrative distance for iBGP routes.
+ Valid values are Integer or keyword 'default'.
+ required: false
+ default: null
+ distance_local:
+ description:
+ - Sets the administrative distance for local BGP routes.
+ Valid values are Integer or keyword 'default'.
+ required: false
+ default: null
+ inject_map:
+ description:
+ - An array of route-map names which will specify prefixes to
+ inject. Each array entry must first specify the inject-map name,
+ secondly an exist-map name, and optionally the copy-attributes
+ keyword which indicates that attributes should be copied from
+ the aggregate. For example [['lax_inject_map', 'lax_exist_map'],
+ ['nyc_inject_map', 'nyc_exist_map', 'copy-attributes'],
+ ['fsd_inject_map', 'fsd_exist_map']].
+ required: false
+ default: null
+ maximum_paths:
+ description:
+ - Configures the maximum number of equal-cost paths for
+ load sharing. Valid value is an integer in the range 1-64.
+ default: null
+ maximum_paths_ibgp:
+ description:
+ - Configures the maximum number of ibgp equal-cost paths for
+ load sharing. Valid value is an integer in the range 1-64.
+ required: false
+ default: null
+ networks:
+ description:
+ - Networks to configure. Valid value is a list of network
+ prefixes to advertise. The list must be in the form of an array.
+ Each entry in the array must include a prefix address and an
+ optional route-map. For example [['10.0.0.0/16', 'routemap_LA'],
+ ['192.168.1.1', 'Chicago'], ['192.168.2.0/24],
+ ['192.168.3.0/24', 'routemap_NYC']].
+ required: false
+ default: null
+ next_hop_route_map:
+ description:
+ - Configure a route-map for valid nexthops. Valid values are a
+ string defining the name of the route-map.
+ required: false
+ default: null
+ redistribute:
+ description:
+ - A list of redistribute directives. Multiple redistribute entries
+ are allowed. The list must be in the form of a nested array.
+ the first entry of each array defines the source-protocol to
+ redistribute from; the second entry defines a route-map name.
+ A route-map is highly advised but may be optional on some
+ platforms, in which case it may be omitted from the array list.
+ For example [['direct', 'rm_direct'], ['lisp', 'rm_lisp']].
+ required: false
+ default: null
+ suppress_inactive:
+ description:
+ - Advertises only active routes to peers.
+ required: false
+ choices: ['true','false']
+ default: null
+ table_map:
+ description:
+ - Apply table-map to filter routes downloaded into URIB.
+ Valid values are a string.
+ required: false
+ default: null
+ table_map_filter:
+ description:
+ - Filters routes rejected by the route-map and does not download
+ them to the RIB.
+ required: false
+ choices: ['true','false']
+ default: null
+ state:
+ description:
+ - Determines whether the config should be present or not
+ on the device.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+EXAMPLES = '''
+# configure a simple address-family
+- nxos_bgp_af:
+ asn: 65535
+ vrf: TESTING
+ afi: ipv4
+ safi: unicast
+ advertise_l2vpn_evpn: true
+ state: present
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"advertise_l2vpn_evpn": true, "afi": "ipv4",
+ "asn": "65535", "safi": "unicast", "vrf": "TESTING"}
+existing:
+ description: k/v pairs of existing BGP AF configuration
+ returned: verbose mode
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of BGP AF configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"additional_paths_install": false,
+ "additional_paths_receive": false,
+ "additional_paths_selection": "",
+ "additional_paths_send": false,
+ "advertise_l2vpn_evpn": true, "afi": "ipv4",
+ "asn": "65535", "client_to_client": true,
+ "dampen_igp_metric": "600", "dampening_half_time": "",
+ "dampening_max_suppress_time": "", "dampening_reuse_time": "",
+ "dampening_routemap": "", "dampening_state": false,
+ "dampening_suppress_time": "",
+ "default_information_originate": false, "default_metric": "",
+ "distance_ebgp": "20", "distance_ibgp": "200",
+ "distance_local": "220", "inject_map": [], "maximum_paths": "1",
+ "maximum_paths_ibgp": "1", "networks": [],
+ "next_hop_route_map": "", "redistribute": [], "safi": "unicast",
+ "suppress_inactive": false, "table_map": "",
+ "table_map_filter": false, "vrf": "TESTING"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["router bgp 65535", "vrf TESTING",
+ "address-family ipv4 unicast", "advertise l2vpn evpn"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.shell import ShellError
+
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+WARNINGS = []
+BOOL_PARAMS = [
+ 'additional_paths_install',
+ 'additional_paths_receive',
+ 'additional_paths_send',
+ 'advertise_l2vpn_evpn',
+ 'client_to_client',
+ 'dampening_state',
+ 'default_information_originate',
+ 'suppress_inactive',
+]
+PARAM_TO_DEFAULT_KEYMAP = {
+ 'maximum_paths': '1',
+ 'maximum_paths_ibgp': '1',
+ 'client_to_client': True,
+ 'distance_ebgp': '20',
+ 'distance_ibgp': '200',
+ 'distance_local': '220',
+ 'dampen_igp_metric': '600'
+}
+PARAM_TO_COMMAND_KEYMAP = {
+ 'asn': 'router bgp',
+ 'afi': 'address-family',
+ 'safi': 'address-family',
+ 'additional_paths_install': 'additional-paths install backup',
+ 'additional_paths_receive': 'additional-paths receive',
+ 'additional_paths_selection': 'additional-paths selection route-map',
+ 'additional_paths_send': 'additional-paths send',
+ 'advertise_l2vpn_evpn': 'advertise l2vpn evpn',
+ 'client_to_client': 'client-to-client reflection',
+ 'dampen_igp_metric': 'dampen-igp-metric',
+ 'dampening_state': 'dampening',
+ 'dampening_half_time': 'dampening',
+ 'dampening_max_suppress_time': 'dampening',
+ 'dampening_reuse_time': 'dampening',
+ 'dampening_routemap': 'dampening route-map',
+ 'dampening_suppress_time': 'dampening',
+ 'default_information_originate': 'default-information originate',
+ 'default_metric': 'default-metric',
+ 'distance_ebgp': 'distance',
+ 'distance_ibgp': 'distance',
+ 'distance_local': 'distance',
+ 'inject_map': 'inject-map',
+ 'maximum_paths': 'maximum-paths',
+ 'maximum_paths_ibgp': 'maximum-paths ibgp',
+ 'networks': 'network',
+ 'redistribute': 'redistribute',
+ 'next_hop_route_map': 'nexthop route-map',
+ 'suppress_inactive': 'suppress-inactive',
+ 'table_map': 'table-map',
+ 'table_map_filter': 'table-map-filter',
+ 'vrf': 'vrf'
+}
+DAMPENING_PARAMS = [
+ 'dampening_half_time',
+ 'dampening_suppress_time',
+ 'dampening_reuse_time',
+ 'dampening_max_suppress_time'
+ ]
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_custom_list_value(config, arg, module):
+ value_list = []
+ splitted_config = config.splitlines()
+ if arg == 'inject_map':
+ REGEX_INJECT = ('.*inject-map\s(?P\S+)'
+ '\sexist-map\s(?P\S+)-*')
+
+ for line in splitted_config:
+ value = []
+ inject_group = {}
+ try:
+ match_inject = re.match(REGEX_INJECT, line, re.DOTALL)
+ inject_group = match_inject.groupdict()
+ inject_map = inject_group['inject_map']
+ exist_map = inject_group['exist_map']
+ value.append(inject_map)
+ value.append(exist_map)
+ except AttributeError:
+ value = []
+
+ if value:
+ copy_attributes = False
+ inject_map_command = ('inject-map {0} exist-map {1} '
+ 'copy-attributes'.format(
+ inject_group['inject_map'],
+ inject_group['exist_map']))
+
+ REGEX = re.compile(r'\s+{0}\s*$'.format(
+ inject_map_command), re.M)
+ try:
+ if REGEX.search(config):
+ copy_attributes = True
+ except TypeError:
+ copy_attributes = False
+
+ if copy_attributes:
+ value.append('copy_attributes')
+ value_list.append(value)
+
+ elif arg == 'networks':
+ REGEX_NETWORK = re.compile(r'(?:network\s)(?P.*)$')
+
+ for line in splitted_config:
+ value = []
+ network_group = {}
+ if 'network' in line:
+ value = REGEX_NETWORK.search(line).group('value').split()
+
+ if value:
+ if len(value) == 3:
+ value.pop(1)
+ value_list.append(value)
+
+ elif arg == 'redistribute':
+ RED_REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(
+ PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ for line in splitted_config:
+ value = []
+ redistribute_group = {}
+ if 'redistribute' in line:
+ value = RED_REGEX.search(line).group('value').split()
+ if value:
+ if len(value) == 3:
+ value.pop(1)
+ elif len(value) == 4:
+ value = ['{0} {1}'.format(
+ value[0], value[1]), value[3]]
+ value_list.append(value)
+ return value_list
+
+
+def get_custom_string_value(config, arg, module):
+ value = ''
+ if arg.startswith('distance'):
+ REGEX_DISTANCE = ('.*distance\s(?P\w+)\s(?P\w+)'
+ '\s(?P\w+)')
+ try:
+ match_distance = re.match(REGEX_DISTANCE, config, re.DOTALL)
+ distance_group = match_distance.groupdict()
+ except AttributeError:
+ distance_group = {}
+
+ if distance_group:
+ if arg == 'distance_ebgp':
+ value = distance_group['d_ebgp']
+ elif arg == 'distance_ibgp':
+ value = distance_group['d_ibgp']
+ elif arg == 'distance_local':
+ value = distance_group['d_local']
+
+ elif arg.startswith('dampening'):
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(
+ PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ if arg == 'dampen_igp_metric' or arg == 'dampening_routemap':
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ else:
+ REGEX_DAMPENING = ('.*dampening\s(?P\w+)\s(?P\w+)'
+ '\s(?P\w+)\s(?P\w+)')
+ try:
+ match_dampening = re.match(REGEX_DAMPENING, config, re.DOTALL)
+ dampening_group = match_dampening.groupdict()
+ except AttributeError:
+ dampening_group = {}
+
+ if dampening_group:
+ if arg == 'dampening_half_time':
+ value = dampening_group['half']
+ elif arg == 'dampening_reuse_time':
+ value = dampening_group['reuse']
+ elif arg == 'dampening_suppress_time':
+ value = dampening_group['suppress']
+ elif arg == 'dampening_max_suppress_time':
+ value = dampening_group['max_suppress']
+
+ elif arg == 'table_map_filter':
+ TMF_REGEX = re.compile(r'\s+table-map.*filter$', re.M)
+ value = False
+ try:
+ if TMF_REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+ elif arg == 'table_map':
+ TM_REGEX = re.compile(r'(?:table-map\s)(?P\S+)(\sfilter)?$', re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = TM_REGEX.search(config).group('value')
+ return value
+
+
+def get_value(arg, config, module):
+ custom = [
+ 'inject_map',
+ 'networks',
+ 'redistribute'
+ ]
+
+ if arg in BOOL_PARAMS:
+ REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+ try:
+ if REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+
+ elif arg in custom:
+ value = get_custom_list_value(config, arg, module)
+
+ elif (arg.startswith('distance') or arg.startswith('dampening') or
+ arg.startswith('table_map')):
+ value = get_custom_string_value(config, arg, module)
+
+ else:
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ netcfg = get_config(module)
+
+ try:
+ asn_regex = '.*router\sbgp\s(?P\d+).*'
+ match_asn = re.match(asn_regex, str(netcfg), re.DOTALL)
+ existing_asn_group = match_asn.groupdict()
+ existing_asn = existing_asn_group['existing_asn']
+ except AttributeError:
+ existing_asn = ''
+
+ if existing_asn:
+ parents = ["router bgp {0}".format(existing_asn)]
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+
+ parents.append('address-family {0} {1}'.format(module.params['afi'],
+ module.params['safi']))
+ config = netcfg.get_section(parents)
+
+ if config:
+ for arg in args:
+ if arg not in ['asn', 'afi', 'safi', 'vrf']:
+ existing[arg] = get_value(arg, config, module)
+
+ existing['asn'] = existing_asn
+ existing['afi'] = module.params['afi']
+ existing['safi'] = module.params['safi']
+ existing['vrf'] = module.params['vrf']
+ else:
+ WARNINGS.append("The BGP process {0} didn't exist but the task"
+ " just created it.".format(module.params['asn']))
+
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def fix_proposed(module, proposed, existing):
+ commands = list()
+ command = ''
+ fixed_proposed = {}
+ for key, value in proposed.iteritems():
+ if key in DAMPENING_PARAMS:
+ if value != 'default':
+ command = 'dampening {0} {1} {2} {3}'.format(
+ proposed.get('dampening_half_time'),
+ proposed.get('dampening_reuse_time'),
+ proposed.get('dampening_suppress_time'),
+ proposed.get('dampening_max_suppress_time'))
+ else:
+ if existing.get(key):
+ command = ('no dampening {0} {1} {2} {3}'.format(
+ existing['dampening_half_time'],
+ existing['dampening_reuse_time'],
+ existing['dampening_suppress_time'],
+ existing['dampening_max_suppress_time']))
+ if 'default' in command:
+ command = ''
+ elif key.startswith('distance'):
+ command = 'distance {0} {1} {2}'.format(
+ proposed.get('distance_ebgp'),
+ proposed.get('distance_ibgp'),
+ proposed.get('distance_local'))
+ else:
+ fixed_proposed[key] = value
+
+ if command:
+ if command not in commands:
+ commands.append(command)
+
+ return fixed_proposed, commands
+
+
+def default_existing(existing_value, key, value):
+ commands = []
+ if key == 'network':
+ for network in existing_value:
+ if len(network) == 2:
+ commands.append('no network {0} route-map {1}'.format(
+ network[0], network[1]))
+ elif len(network) == 1:
+ commands.append('no network {0}'.format(
+ network[0]))
+
+ elif key == 'inject-map':
+ for maps in existing_value:
+ if len(maps) == 2:
+ commands.append('no inject-map {0} exist-map {1}'.format(
+ maps[0], maps[1]))
+ elif len(maps) == 3:
+ commands.append('no inject-map {0} exist-map {1} '
+ 'copy-attributes'.format(
+ maps[0], maps[1]))
+ else:
+ commands.append('no {0} {1}'.format(key, existing_value))
+ return commands
+
+
+def get_network_command(existing, key, value):
+ commands = []
+ existing_networks = existing.get('networks', [])
+ for inet in value:
+ if not isinstance(inet, list):
+ inet = [inet]
+ if inet not in existing_networks:
+ if len(inet) == 1:
+ command = '{0} {1}'.format(key, inet[0])
+ elif len(inet) == 2:
+ command = '{0} {1} route-map {2}'.format(key,
+ inet[0], inet[1])
+ commands.append(command)
+ return commands
+
+
+def get_inject_map_command(existing, key, value):
+ commands = []
+ existing_maps = existing.get('inject_map', [])
+ for maps in value:
+ if not isinstance(maps, list):
+ maps = [maps]
+ if maps not in existing_maps:
+ if len(maps) == 2:
+ command = ('inject-map {0} exist-map {1}'.format(
+ maps[0], maps[1]))
+ elif len(maps) == 3:
+ command = ('inject-map {0} exist-map {1} '
+ 'copy-attributes'.format(maps[0],
+ maps[1]))
+ commands.append(command)
+ return commands
+
+
+def get_redistribute_command(existing, key, value):
+ commands = []
+ for rule in value:
+ if rule[1] == 'default':
+ existing_rule = existing.get('redistribute', [])
+ for each_rule in existing_rule:
+ if rule[0] in each_rule:
+ command = 'no {0} {1} route-map {2}'.format(
+ key, each_rule[0], each_rule[1])
+ commands.append(command)
+ else:
+ command = '{0} {1} route-map {2}'.format(key, rule[0], rule[1])
+ commands.append(command)
+ return commands
+
+
+def get_table_map_command(module, existing, key, value):
+ commands = []
+ if key == 'table-map':
+ if value != 'default':
+ command = '{0} {1}'.format(key, module.params['table_map'])
+ if (module.params['table_map_filter'] is not None and
+ module.params['table_map_filter'] != 'default'):
+ command += ' filter'
+ commands.append(command)
+ else:
+ if existing.get('table_map'):
+ command = 'no {0} {1}'.format(key, existing.get('table_map'))
+ commands.append(command)
+ return commands
+
+
+def get_default_table_map_filter(existing):
+ commands = []
+ existing_table_map_filter = existing.get('table_map_filter')
+ if existing_table_map_filter:
+ existing_table_map = existing.get('table_map')
+ if existing_table_map:
+ command = 'table-map {0}'.format(existing_table_map)
+ commands.append(command)
+ return commands
+
+
+def state_present(module, existing, proposed, candidate):
+ fixed_proposed, commands = fix_proposed(module, proposed, existing)
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, fixed_proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+ for key, value in proposed_commands.iteritems():
+ if key == 'address-family':
+ addr_family_command = "address-family {0} {1}".format(
+ module.params['afi'], module.params['safi'])
+ if addr_family_command not in commands:
+ commands.append(addr_family_command)
+
+ elif key.startswith('table-map'):
+ table_map_commands = get_table_map_command(module, existing, key, value)
+ if table_map_commands:
+ commands.extend(table_map_commands)
+
+ elif value is True:
+ commands.append(key)
+
+ elif value is False:
+ commands.append('no {0}'.format(key))
+
+ elif value == 'default':
+ if key in PARAM_TO_DEFAULT_KEYMAP:
+ commands.append('{0} {1}'.format(key, PARAM_TO_DEFAULT_KEYMAP[key]))
+
+ elif existing_commands.get(key):
+ if key == 'table-map-filter':
+ default_tmf_command = get_default_table_map_filter(existing)
+
+ if default_tmf_command:
+ commands.extend(default_tmf_command)
+ else:
+ existing_value = existing_commands.get(key)
+ default_command = default_existing(existing_value, key, value)
+ if default_command:
+ commands.extend(default_command)
+ else:
+ if key == 'network':
+ network_commands = get_network_command(existing, key, value)
+ if network_commands:
+ commands.extend(network_commands)
+
+ elif key == 'inject-map':
+ inject_map_commands = get_inject_map_command(existing, key, value)
+ if inject_map_commands:
+ commands.extend(inject_map_commands)
+
+ elif key == 'redistribute':
+ redistribute_commands = get_redistribute_command(existing, key, value)
+ if redistribute_commands:
+ commands.extend(redistribute_commands)
+
+ else:
+ command = '{0} {1}'.format(key, value)
+ commands.append(command)
+
+ if commands:
+ parents = ["router bgp {0}".format(module.params['asn'])]
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+
+ if len(commands) == 1:
+ candidate.add(commands, parents=parents)
+ elif len(commands) > 1:
+ parents.append('address-family {0} {1}'.format(module.params['afi'],
+ module.params['safi']))
+ if addr_family_command in commands:
+ commands.remove(addr_family_command)
+ candidate.add(commands, parents=parents)
+
+
+def state_absent(module, existing, proposed, candidate):
+ commands = []
+ parents = ["router bgp {0}".format(module.params['asn'])]
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+
+ commands.append('no address-family {0} {1}'.format(
+ module.params['afi'], module.params['safi']))
+ candidate.add(commands, parents=parents)
+
+
+def main():
+ argument_spec = dict(
+ asn=dict(required=True, type='str'),
+ vrf=dict(required=False, type='str', default='default'),
+ safi=dict(required=True, type='str', choices=['unicast','multicast', 'evpn']),
+ afi=dict(required=True, type='str', choices=['ipv4','ipv6', 'vpnv4', 'vpnv6', 'l2vpn']),
+ additional_paths_install=dict(required=False, type='bool'),
+ additional_paths_receive=dict(required=False, type='bool'),
+ additional_paths_selection=dict(required=False, type='str'),
+ additional_paths_send=dict(required=False, type='bool'),
+ advertise_l2vpn_evpn=dict(required=False, type='bool'),
+ client_to_client=dict(required=False, type='bool'),
+ dampen_igp_metric=dict(required=False, type='str'),
+ dampening_state=dict(required=False, type='bool'),
+ dampening_half_time=dict(required=False, type='str'),
+ dampening_max_suppress_time=dict(required=False, type='str'),
+ dampening_reuse_time=dict(required=False, type='str'),
+ dampening_routemap=dict(required=False, type='str'),
+ dampening_suppress_time=dict(required=False, type='str'),
+ default_information_originate=dict(required=False, type='bool'),
+ default_metric=dict(required=False, type='str'),
+ distance_ebgp=dict(required=False, type='str'),
+ distance_ibgp=dict(required=False, type='str'),
+ distance_local=dict(required=False, type='str'),
+ inject_map=dict(required=False, type='list'),
+ maximum_paths=dict(required=False, type='str'),
+ maximum_paths_ibgp=dict(required=False, type='str'),
+ networks=dict(required=False, type='list'),
+ next_hop_route_map=dict(required=False, type='str'),
+ redistribute=dict(required=False, type='list'),
+ suppress_inactive=dict(required=False, type='bool'),
+ table_map=dict(required=False, type='str'),
+ table_map_filter=dict(required=False, type='bool'),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ required_together=[DAMPENING_PARAMS,
+ ['distance_ibgp',
+ 'distance_ebgp',
+ 'distance_local']],
+ supports_check_mode=True)
+
+ state = module.params['state']
+ if module.params['dampening_routemap']:
+ for param in DAMPENING_PARAMS:
+ if module.params[param]:
+ module.fail_json(msg='dampening_routemap cannot be used with'
+ ' the {0} param'.format(param))
+
+ if module.params['advertise_l2vpn_evpn']:
+ if module.params['vrf'] == 'default':
+ module.fail_json(msg='It is not possible to advertise L2VPN '
+ 'EVPN in the default VRF. Please specify '
+ 'another one.', vrf=module.params['vrf'])
+
+ if module.params['table_map_filter'] and not module.params['table_map']:
+ module.fail_json(msg='table_map param is needed when using'
+ ' table_map_filter filter.')
+
+ args = [
+ "additional_paths_install",
+ "additional_paths_receive",
+ "additional_paths_selection",
+ "additional_paths_send",
+ "advertise_l2vpn_evpn",
+ "afi",
+ "asn",
+ "client_to_client",
+ "dampen_igp_metric",
+ "dampening_half_time",
+ "dampening_max_suppress_time",
+ "dampening_reuse_time",
+ "dampening_suppress_time",
+ "dampening_routemap",
+ "dampening_state",
+ "default_information_originate",
+ "default_metric",
+ "distance_ebgp",
+ "distance_ibgp",
+ "distance_local",
+ "inject_map",
+ "maximum_paths",
+ "maximum_paths_ibgp",
+ "networks",
+ "next_hop_route_map",
+ "redistribute",
+ "safi",
+ "suppress_inactive",
+ "table_map",
+ "table_map_filter",
+ "vrf"
+ ]
+
+ existing = invoke('get_existing', module, args)
+
+ if existing.get('asn'):
+ if (existing.get('asn') != module.params['asn'] and
+ state == 'present'):
+ module.fail_json(msg='Another BGP ASN already exists.',
+ proposed_asn=module.params['asn'],
+ existing_asn=existing.get('asn'))
+
+ end_state = existing
+ proposed_args = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ if proposed_args.get('networks'):
+ if proposed_args['networks'][0] == 'default':
+ proposed_args['networks'] = 'default'
+ if proposed_args.get('inject_map'):
+ if proposed_args['inject_map'][0] == 'default':
+ proposed_args['inject_map'] = 'default'
+
+ proposed = {}
+ for key, value in proposed_args.iteritems():
+ if key not in ['asn', 'vrf']:
+ if str(value).lower() == 'default':
+ value = PARAM_TO_DEFAULT_KEYMAP.get(key)
+ if value is None:
+ value = 'default'
+ if existing.get(key) or (not existing.get(key) and value):
+ proposed[key] = value
+
+ result = {}
+ if state == 'present' or (state == 'absent' and existing):
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed_args
+
+ if WARNINGS:
+ result['warnings'] = WARNINGS
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_bgp_neighbor.py b/network/nxos/nxos_bgp_neighbor.py
new file mode 100644
index 00000000000..f0cc6145819
--- /dev/null
+++ b/network/nxos/nxos_bgp_neighbor.py
@@ -0,0 +1,769 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_bgp_neighbor
+version_added: "2.2"
+short_description: Manages BGP neighbors configurations.
+description:
+ - Manages BGP neighbors configurations on NX-OS switches.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - C(state=absent) removes the whole BGP neighbor configuration.
+ - Default, where supported, restores params default value.
+options:
+ asn:
+ description:
+ - BGP autonomous system number. Valid values are string,
+ Integer in ASPLAIN or ASDOT notation.
+ required: true
+ vrf:
+ description:
+ - Name of the VRF. The name 'default' is a valid VRF representing
+ the global bgp.
+ required: false
+ default: default
+ neighbor:
+ description:
+ - Neighbor Identifier. Valid values are string. Neighbors may use
+ IPv4 or IPv6 notation, with or without prefix length.
+ required: true
+ description:
+ description:
+ - Description of the neighbor.
+ required: false
+ default: null
+ connected_check:
+ description:
+ - Configure whether or not to check for directly connected peer.
+ required: false
+ choices: ['true', 'false']
+ default: null
+ capability_negotiation:
+ description:
+ - Configure whether or not to negotiate capability with
+ this neighbor.
+ required: false
+ choices: ['true', 'false']
+ default: null
+ dynamic_capability:
+ description:
+ - Configure whether or not to enable dynamic capability.
+ required: false
+ choices: ['true', 'false']
+ default: null
+ ebgp_multihop:
+ description:
+ - Specify multihop TTL for a remote peer. Valid values are
+ integers between 2 and 255, or keyword 'default' to disable
+ this property.
+ required: false
+ default: null
+ local_as:
+ description:
+ - Specify the local-as number for the eBGP neighbor.
+ Valid values are String or Integer in ASPLAIN or ASDOT notation,
+ or 'default', which means not to configure it.
+ required: false
+ default: null
+ log_neighbor_changes:
+ description:
+ - Specify whether or not to enable log messages for neighbor
+ up/down event.
+ required: false
+ choices: ['enable', 'disable', 'inherit']
+ default: null
+ low_memory_exempt:
+ description:
+ - Specify whether or not to shut down this neighbor under
+ memory pressure.
+ required: false
+ choices: ['true', 'false']
+ default: null
+ maximum_peers:
+ description:
+ - Specify Maximum number of peers for this neighbor prefix
+ Valid values are between 1 and 1000, or 'default', which does
+ not impose the limit.
+ required: false
+ default: null
+ pwd:
+ description:
+ - Specify the password for neighbor. Valid value is string.
+ required: false
+ default: null
+ pwd_type:
+ description:
+ - Specify the encryption type the password will use. Valid values
+ are '3des' or 'cisco_type_7' encryption.
+ required: false
+ choices: ['3des', 'cisco_type_7']
+ default: null
+ remote_as:
+ description:
+ - Specify Autonomous System Number of the neighbor.
+ Valid values are String or Integer in ASPLAIN or ASDOT notation,
+ or 'default', which means not to configure it.
+ required: false
+ default: null
+ remove_private_as:
+ description:
+ - Specify the config to remove private AS number from outbound
+ updates. Valid values are 'enable' to enable this config,
+ 'disable' to disable this config, 'all' to remove all
+ private AS number, or 'replace-as', to replace the private
+ AS number.
+ required: false
+ choices: ['enable', 'disable', 'all', 'replace-as']
+ default: null
+ shutdown:
+ description:
+ - Configure to administratively shutdown this neighbor.
+ required: false
+ choices: ['true','false']
+ default: null
+ suppress_4_byte_as:
+ description:
+ - Configure to suppress 4-byte AS Capability.
+ required: false
+ choices: ['true','false']
+ default: null
+ timers_keepalive:
+ description:
+ - Specify keepalive timer value. Valid values are integers
+ between 0 and 3600 in terms of seconds, or 'default',
+ which is 60.
+ required: false
+ default: null
+ timers_holdtime:
+ description:
+ - Specify holdtime timer value. Valid values are integers between
+ 0 and 3600 in terms of seconds, or 'default', which is 180.
+ required: false
+ default: null
+ transport_passive_only:
+ description:
+ - Specify whether or not to only allow passive connection setup.
+ Valid values are 'true', 'false', and 'default', which defaults
+ to 'false'. This property can only be configured when the
+ neighbor is in 'ip' address format without prefix length.
+ This property and the transport_passive_mode property are
+ mutually exclusive.
+ required: false
+ choices: ['true','false']
+ default: null
+ update_source:
+ description:
+ - Specify source interface of BGP session and updates.
+ required: false
+ default: null
+ state:
+ description:
+ - Determines whether the config should be present or not
+ on the device.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+EXAMPLES = '''
+# create a new neighbor
+- nxos_bgp_neighbor:
+ asn: 65535
+ neighbor: 3.3.3.3
+ local_as: 20
+ remote_as: 30
+ description: "just a description"
+ update_source: Ethernet1/3
+ shutdown: default
+ state: present
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"asn": "65535", "description": "just a description",
+ "local_as": "20", "neighbor": "3.3.3.3",
+ "remote_as": "30", "shutdown": "default",
+ "update_source": "Ethernet1/3", "vrf": "default"}
+existing:
+ description: k/v pairs of existing BGP neighbor configuration
+ returned: verbose mode
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of BGP neighbor configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"asn": "65535", "capability_negotiation": false,
+ "connected_check": false, "description": "just a description",
+ "dynamic_capability": true, "ebgp_multihop": "",
+ "local_as": "20", "log_neighbor_changes": "",
+ "low_memory_exempt": false, "maximum_peers": "",
+ "neighbor": "3.3.3.3", "pwd": "",
+ "pwd_type": "", "remote_as": "30",
+ "remove_private_as": "disable", "shutdown": false,
+ "suppress_4_byte_as": false, "timers_holdtime": "180",
+ "timers_keepalive": "60", "transport_passive_only": false,
+ "update_source": "Ethernet1/3", "vrf": "default"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["router bgp 65535", "neighbor 3.3.3.3",
+ "remote-as 30", "update-source Ethernet1/3",
+ "description just a description", "local-as 20"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.shell import ShellError
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+WARNINGS = []
+BOOL_PARAMS = [
+ 'capability_negotiation',
+ 'shutdown',
+ 'connected_check',
+ 'dynamic_capability',
+ 'low_memory_exempt',
+ 'suppress_4_byte_as',
+ 'transport_passive_only'
+]
+PARAM_TO_COMMAND_KEYMAP = {
+ 'asn': 'router bgp',
+ 'capability_negotiation': 'dont-capability-negotiate',
+ 'connected_check': 'disable-connected-check',
+ 'description': 'description',
+ 'dynamic_capability': 'dynamic-capability',
+ 'ebgp_multihop': 'ebgp-multihop',
+ 'local_as': 'local-as',
+ 'log_neighbor_changes': 'log-neighbor-changes',
+ 'low_memory_exempt': 'low-memory exempt',
+ 'maximum_peers': 'maximum-peers',
+ 'neighbor': 'neighbor',
+ 'pwd': 'password',
+ 'pwd_type': 'password-type',
+ 'remote_as': 'remote-as',
+ 'remove_private_as': 'remove-private-as',
+ 'shutdown': 'shutdown',
+ 'suppress_4_byte_as': 'capability suppress 4-byte-as',
+ 'timers_keepalive': 'timers-keepalive',
+ 'timers_holdtime': 'timers-holdtime',
+ 'transport_passive_only': 'transport connection-mode passive',
+ 'update_source': 'update-source',
+ 'vrf': 'vrf'
+}
+PARAM_TO_DEFAULT_KEYMAP = {
+ 'shutdown': False,
+ 'dynamic_capability': True,
+ 'timers_keepalive': 60,
+ 'timers_holdtime': 180
+}
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(arg, config, module):
+ if arg in BOOL_PARAMS:
+ REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+ try:
+ if REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+
+ else:
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def get_custom_value(arg, config, module):
+ value = ''
+ splitted_config = config.splitlines()
+
+ if arg == 'log_neighbor_changes':
+ for line in splitted_config:
+ if 'log-neighbor-changes' in line:
+ if 'disable' in line:
+ value = 'disable'
+ else:
+ value = 'enable'
+
+ elif arg == 'pwd':
+ for line in splitted_config:
+ if 'password' in line:
+ splitted_line = line.split()
+ value = splitted_line[2]
+
+ elif arg == 'pwd_type':
+ for line in splitted_config:
+ if 'password' in line:
+ splitted_line = line.split()
+ value = splitted_line[1]
+
+ elif arg == 'remove_private_as':
+ value = 'disable'
+ for line in splitted_config:
+ if 'remove-private-as' in line:
+ splitted_line = line.split()
+ if len(splitted_line) == 1:
+ value = 'enable'
+ elif len(splitted_line) == 2:
+ value = splitted_line[1]
+
+ elif arg == 'timers_keepalive':
+ REGEX = re.compile(r'(?:timers\s)(?P.*)$', re.M)
+ value = ''
+ if 'timers' in config:
+ parsed = REGEX.search(config).group('value').split()
+ value = parsed[0]
+
+ elif arg == 'timers_holdtime':
+ REGEX = re.compile(r'(?:timers\s)(?P.*)$', re.M)
+ value = ''
+ if 'timers' in config:
+ parsed = REGEX.search(config).group('value').split()
+ if len(parsed) == 2:
+ value = parsed[1]
+
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ netcfg = get_config(module)
+ custom = [
+ 'log_neighbor_changes',
+ 'pwd',
+ 'pwd_type',
+ 'remove_private_as',
+ 'timers_holdtime',
+ 'timers_keepalive'
+ ]
+ try:
+ asn_regex = '.*router\sbgp\s(?P\d+).*'
+ match_asn = re.match(asn_regex, str(netcfg), re.DOTALL)
+ existing_asn_group = match_asn.groupdict()
+ existing_asn = existing_asn_group['existing_asn']
+ except AttributeError:
+ existing_asn = ''
+
+ if existing_asn:
+ parents = ["router bgp {0}".format(existing_asn)]
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+
+ parents.append('neighbor {0}'.format(module.params['neighbor']))
+ config = netcfg.get_section(parents)
+
+ if config:
+ for arg in args:
+ if arg not in ['asn', 'vrf', 'neighbor']:
+ if arg in custom:
+ existing[arg] = get_custom_value(arg, config, module)
+ else:
+ existing[arg] = get_value(arg, config, module)
+
+ existing['asn'] = existing_asn
+ existing['neighbor'] = module.params['neighbor']
+ existing['vrf'] = module.params['vrf']
+ else:
+ WARNINGS.append("The BGP process didn't exist but the task"
+ " just created it.")
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def state_present(module, existing, proposed, candidate):
+ commands = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ for key, value in proposed_commands.iteritems():
+ if value is True:
+ commands.append(key)
+
+ elif value is False:
+ commands.append('no {0}'.format(key))
+
+ elif value == 'default':
+ if existing_commands.get(key):
+ existing_value = existing_commands.get(key)
+ commands.append('no {0} {1}'.format(key, existing_value))
+ else:
+ if key == 'log-neighbor-changes':
+ if value == 'enable':
+ commands.append('{0}'.format(key))
+ elif value == 'disable':
+ commands.append('{0} {1}'.format(key, value))
+ elif value == 'inherit':
+ if existing_commands.get(key):
+ commands.append('no {0}'.format(key))
+ elif key == 'password':
+ pwd_type = module.params['pwd_type']
+ if pwd_type == '3des':
+ pwd_type = 3
+ else:
+ pwd_type = 7
+ command = '{0} {1} {2}'.format(key, pwd_type, value)
+ if command not in commands:
+ commands.append(command)
+ elif key == 'remove-private-as':
+ if value == 'enable':
+ command = '{0}'.format(key)
+ commands.append(command)
+ elif value == 'disable':
+ if existing_commands.get(key) != 'disable':
+ command = 'no {0}'.format(key)
+ commands.append(command)
+ else:
+ command = '{0} {1}'.format(key, value)
+ commands.append(command)
+ elif key.startswith('timers'):
+ command = 'timers {0} {1}'.format(
+ proposed_commands['timers-keepalive'],
+ proposed_commands['timers-holdtime'])
+ if command not in commands:
+ commands.append(command)
+ else:
+ command = '{0} {1}'.format(key, value)
+ commands.append(command)
+
+ if commands:
+ parents = ["router bgp {0}".format(module.params['asn'])]
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+
+ parents.append('neighbor {0}'.format(module.params['neighbor']))
+
+ # make sure that local-as is the last command in the list.
+ local_as_command = 'local-as {0}'.format(module.params['local_as'])
+ if local_as_command in commands:
+ commands.remove(local_as_command)
+ commands.append(local_as_command)
+ candidate.add(commands, parents=parents)
+
+
+def state_absent(module, existing, proposed, candidate):
+ commands = []
+ parents = ["router bgp {0}".format(module.params['asn'])]
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+
+ commands.append('no neighbor {0}'.format(module.params['neighbor']))
+ candidate.add(commands, parents=parents)
+
+
+def main():
+ argument_spec = dict(
+ asn=dict(required=True, type='str'),
+ vrf=dict(required=False, type='str', default='default'),
+ neighbor=dict(required=True, type='str'),
+ description=dict(required=False, type='str'),
+ capability_negotiation=dict(required=False, type='bool'),
+ connected_check=dict(required=False, type='bool'),
+ dynamic_capability=dict(required=False, type='bool'),
+ ebgp_multihop=dict(required=False, type='str'),
+ local_as=dict(required=False, type='str'),
+ log_neighbor_changes=dict(required=False, type='str', choices=['enable', 'disable', 'inherit']),
+ low_memory_exempt=dict(required=False, type='bool'),
+ maximum_peers=dict(required=False, type='str'),
+ pwd=dict(required=False, type='str'),
+ pwd_type=dict(required=False, type='str', choices=['cleartext', '3des', 'cisco_type_7', 'default']),
+ remote_as=dict(required=False, type='str'),
+ remove_private_as=dict(required=False, type='str', choices=['enable', 'disable', 'all', 'replace-as']),
+ shutdown=dict(required=False, type='str'),
+ suppress_4_byte_as=dict(required=False, type='bool'),
+ timers_keepalive=dict(required=False, type='str'),
+ timers_holdtime=dict(required=False, type='str'),
+ transport_passive_only=dict(required=False, type='bool'),
+ update_source=dict(required=False, type='str'),
+ m_facts=dict(required=False, default=False, type='bool'),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ required_together=[['timer_bgp_hold',
+ 'timer_bgp_keepalive']],
+ supports_check_mode=True)
+
+ state = module.params['state']
+ if module.params['pwd_type'] == 'default':
+ module.params['pwd_type'] = '0'
+
+ args = [
+ 'asn',
+ 'capability_negotiation',
+ 'connected_check',
+ 'description',
+ 'dynamic_capability',
+ 'ebgp_multihop',
+ 'local_as',
+ 'log_neighbor_changes',
+ 'low_memory_exempt',
+ 'maximum_peers',
+ 'neighbor',
+ 'pwd',
+ 'pwd_type',
+ 'remote_as',
+ 'remove_private_as',
+ 'shutdown',
+ 'suppress_4_byte_as',
+ 'timers_keepalive',
+ 'timers_holdtime',
+ 'transport_passive_only',
+ 'update_source',
+ 'vrf'
+ ]
+
+ existing = invoke('get_existing', module, args)
+ if existing.get('asn'):
+ if (existing.get('asn') != module.params['asn'] and
+ state == 'present'):
+ module.fail_json(msg='Another BGP ASN already exists.',
+ proposed_asn=module.params['asn'],
+ existing_asn=existing.get('asn'))
+
+ end_state = existing
+ proposed_args = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ proposed = {}
+ for key, value in proposed_args.iteritems():
+ if key not in ['asn', 'vrf', 'neighbor', 'pwd_type']:
+ if str(value).lower() == 'default':
+ value = PARAM_TO_DEFAULT_KEYMAP.get(key)
+ if value is None:
+ value = 'default'
+ if existing.get(key) or (not existing.get(key) and value):
+ proposed[key] = value
+
+ result = {}
+ if state == 'present' or (state == 'absent' and existing):
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed_args
+
+ if WARNINGS:
+ result['warnings'] = WARNINGS
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_bgp_neighbor_af.py b/network/nxos/nxos_bgp_neighbor_af.py
new file mode 100644
index 00000000000..e7d9ea6481e
--- /dev/null
+++ b/network/nxos/nxos_bgp_neighbor_af.py
@@ -0,0 +1,1122 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_bgp_neighbor_af
+version_added: "2.2"
+short_description: Manages BGP address-family's neighbors configuration.
+description:
+ - Manages BGP address-family's neighbors configurations on NX-OS switches.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - C(state=absent) removes the whole BGP address-family's
+ neighbor configuration.
+ - Default, when supported, removes properties
+ - In order to default maximum-prefix configuration, only
+ C(max_prefix_limit=default) is needed.
+options:
+ asn:
+ description:
+ - BGP autonomous system number. Valid values are String,
+ Integer in ASPLAIN or ASDOT notation.
+ required: true
+ vrf:
+ description:
+ - Name of the VRF. The name 'default' is a valid VRF representing
+ the global bgp.
+ required: false
+ default: default
+ neighbor:
+ description:
+ - Neighbor Identifier. Valid values are string. Neighbors may use
+ IPv4 or IPv6 notation, with or without prefix length.
+ required: true
+ afi:
+ description:
+ - Address Family Identifier.
+ required: true
+ choices: ['ipv4','ipv6', 'vpnv4', 'vpnv6', 'l2vpn']
+ safi:
+ description:
+ - Sub Address Family Identifier.
+ required: true
+ choices: ['unicast','multicast', 'evpn']
+ additional_paths_receive:
+ description:
+ - Valid values are enable for basic command enablement; disable
+ for disabling the command at the neighbor af level
+ (it adds the disable keyword to the basic command); and inherit
+ to remove the command at this level (the command value is
+ inherited from a higher BGP layer).
+ required: false
+ choices: ['enable','disable', 'inherit']
+ default: null
+ additional_paths_send:
+ description:
+ - Valid values are enable for basic command enablement; disable
+ for disabling the command at the neighbor af level
+ (it adds the disable keyword to the basic command); and inherit
+ to remove the command at this level (the command value is
+ inherited from a higher BGP layer).
+ required: false
+ choices: ['enable','disable', 'inherit']
+ default: null
+ advertise_map_exist:
+ description:
+ - Conditional route advertisement. This property requires two
+ route maps, an advertise-map and an exist-map. Valid values are
+ an array specifying both the advertise-map name and the exist-map
+ name, or simply 'default' e.g. ['my_advertise_map',
+ 'my_exist_map']. This command is mutually exclusive with the
+ advertise_map_non_exist property.
+ required: false
+ default: null
+ advertise_map_non_exist:
+ description:
+ - Conditional route advertisement. This property requires two
+ route maps, an advertise-map and an exist-map. Valid values are
+ an array specifying both the advertise-map name and the
+ non-exist-map name, or simply 'default' e.g.
+ ['my_advertise_map', 'my_non_exist_map']. This command is mutually
+ exclusive with the advertise_map_exist property.
+ required: false
+ default: null
+ allowas_in:
+ description:
+ - Activate allowas-in property
+ required: false
+ default: null
+ allowas_in_max:
+ description:
+ - Optional max-occurrences value for allowas_in. Valid values are
+ an integer value or 'default'. Can be used independently or in
+ conjunction with allowas_in.
+ required: false
+ default: null
+ as_override:
+ description:
+ - Activate the as-override feature.
+ required: false
+ choices: ['true', 'false']
+ default: null
+ default_originate:
+ description:
+ - Activate the default-originate feature.
+ required: false
+ choices: ['true', 'false']
+ default: null
+ default_originate_route_map:
+ description:
+ - Optional route-map for the default_originate property. Can be
+ used independently or in conjunction with C(default_originate).
+ Valid values are a string defining a route-map name,
+ or 'default'.
+ required: false
+ default: null
+ filter_list_in:
+ description:
+ - Valid values are a string defining a filter-list name,
+ or 'default'.
+ required: false
+ default: null
+ filter_list_out:
+ description:
+ - Valid values are a string defining a filter-list name,
+ or 'default'.
+ required: false
+ default: null
+ max_prefix_limit:
+ description:
+ - maximum-prefix limit value. Valid values are an integer value
+ or 'default'.
+ required: false
+ default: null
+ max_prefix_interval:
+ description:
+ - Optional restart interval. Valid values are an integer.
+ Requires max_prefix_limit.
+ required: false
+ default: null
+ max_prefix_threshold:
+ description:
+ - Optional threshold percentage at which to generate a warning.
+ Valid values are an integer value.
+ Requires max_prefix_limit.
+ required: false
+ default: null
+ max_prefix_warning:
+ description:
+ - Optional warning-only keyword. Requires max_prefix_limit.
+ required: false
+ choices: ['true','false']
+ default: null
+ next_hop_self:
+ description:
+ - Activate the next-hop-self feature.
+ required: false
+ choices: ['true','false']
+ default: null
+ next_hop_third_party:
+ description:
+ - Activate the next-hop-third-party feature.
+ required: false
+ choices: ['true','false']
+ default: null
+ prefix_list_in:
+ description:
+ - Valid values are a string defining a prefix-list name,
+ or 'default'.
+ required: false
+ default: null
+ prefix_list_out:
+ description:
+ - Valid values are a string defining a prefix-list name,
+ or 'default'.
+ required: false
+ default: null
+ route_map_in:
+ description:
+ - Valid values are a string defining a route-map name,
+ or 'default'.
+ required: false
+ default: null
+ route_map_out:
+ description:
+ - Valid values are a string defining a route-map name,
+ or 'default'.
+ required: false
+ default: null
+ route_reflector_client:
+ description:
+ - Router reflector client.
+ required: false
+ choices: ['true','false']
+ default: null
+ send_community:
+ description:
+ - send-community attribute.
+ required: false
+ choices: ['none', 'both', 'extended', 'standard', 'default']
+ default: null
+ soft_reconfiguration_in:
+ description:
+ - Valid values are 'enable' for basic command enablement; 'always'
+ to add the always keyword to the basic command; and 'inherit' to
+ remove the command at this level (the command value is inherited
+ from a higher BGP layer).
+ required: false
+ choices: ['enable','always','inherit']
+ default: null
+ soo:
+ description:
+ - Site-of-origin. Valid values are a string defining a VPN
+ extcommunity or 'default'.
+ required: false
+ default: null
+ suppress_inactive:
+ description:
+ - suppress-inactive feature.
+ required: false
+ choices: ['true','false','default']
+ default: null
+ unsuppress_map:
+ description:
+ - unsuppress-map. Valid values are a string defining a route-map
+ name or 'default'.
+ required: false
+ default: null
+ weight:
+ description:
+ - Weight value. Valid values are an integer value or 'default'.
+ required: false
+ default: null
+ state:
+ description:
+ - Determines whether the config should be present or not
+ on the device.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+EXAMPLES = '''
+configure RR client
+- nxos_bgp_neighbor_af:
+ asn: 65535
+ neighbor: '3.3.3.3'
+ afi: ipv4
+ safi: unicast
+ route_reflector_client: true
+ state: present
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"afi": "ipv4", "asn": "65535",
+ "neighbor": "3.3.3.3", "route_reflector_client": true,
+ "safi": "unicast", "vrf": "default"}
+existing:
+ description: k/v pairs of existing configuration
+ returned: verbose mode
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"additional_paths_receive": "inherit",
+ "additional_paths_send": "inherit",
+ "advertise_map_exist": [], "advertise_map_non_exist": [],
+ "afi": "ipv4", "allowas_in": false,
+ "allowas_in_max": "", "as_override": false,
+ "asn": "65535", "default_originate": false,
+ "default_originate_route_map": "", "filter_list_in": "",
+ "filter_list_out": "", "max_prefix_interval": "",
+ "max_prefix_limit": "", "max_prefix_threshold": "",
+ "max_prefix_warning": "", "neighbor": "3.3.3.3",
+ "next_hop_self": false, "next_hop_third_party": true,
+ "prefix_list_in": "", "prefix_list_out": "",
+ "route_map_in": "", "route_map_out": "",
+ "route_reflector_client": true, "safi": "unicast",
+ "send_community": "",
+ "soft_reconfiguration_in": "inherit", "soo": "",
+ "suppress_inactive": false, "unsuppress_map": "",
+ "vrf": "default", "weight": ""}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["router bgp 65535", "neighbor 3.3.3.3",
+ "address-family ipv4 unicast", "route-reflector-client"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+# COMMON CODE FOR MIGRATION
+import re
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.shell import ShellError
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+WARNINGS = []
+BOOL_PARAMS = [
+ 'allowas_in',
+ 'as_override',
+ 'default_originate',
+ 'next_hop_self',
+ 'next_hop_third_party',
+ 'route_reflector_client',
+ 'suppress_inactive'
+]
+PARAM_TO_COMMAND_KEYMAP = {
+ 'afi': 'address-family',
+ 'asn': 'router bgp',
+ 'neighbor': 'neighbor',
+ 'additional_paths_receive': 'capability additional-paths receive',
+ 'additional_paths_send': 'capability additional-paths send',
+ 'advertise_map_exist': 'advertise-map exist',
+ 'advertise_map_non_exist': 'advertise-map non-exist',
+ 'allowas_in': 'allowas-in',
+ 'allowas_in_max': 'allowas-in max',
+ 'as_override': 'as-override',
+ 'default_originate': 'default-originate',
+ 'default_originate_route_map': 'default-originate route-map',
+ 'filter_list_in': 'filter-list in',
+ 'filter_list_out': 'filter-list out',
+ 'max_prefix_limit': 'maximum-prefix',
+ 'max_prefix_interval': 'maximum-prefix options',
+ 'max_prefix_threshold': 'maximum-prefix options',
+ 'max_prefix_warning': 'maximum-prefix options',
+ 'next_hop_self': 'next-hop-self',
+ 'next_hop_third_party': 'next-hop-third-party',
+ 'prefix_list_in': 'prefix-list in',
+ 'prefix_list_out': 'prefix-list out',
+ 'route_map_in': 'route-map in',
+ 'route_map_out': 'route-map out',
+ 'route_reflector_client': 'route-reflector-client',
+ 'safi': 'address-family',
+ 'send_community': 'send-community',
+ 'soft_reconfiguration_in': 'soft-reconfiguration inbound',
+ 'soo': 'soo',
+ 'suppress_inactive': 'suppress-inactive',
+ 'unsuppress_map': 'unsuppress-map',
+ 'weight': 'weight',
+ 'vrf': 'vrf'
+}
+PARAM_TO_DEFAULT_KEYMAP = {}
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(arg, config, module):
+ if arg in BOOL_PARAMS:
+ REGEX = re.compile(r'\s+{0}\s*'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+ try:
+ if REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+ else:
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def in_out_param(arg, config, module):
+ value = ''
+ for line in config:
+ if PARAM_TO_COMMAND_KEYMAP[arg].split()[0] in line:
+ splitted_line = line.split()
+ if splitted_line[-1] == PARAM_TO_COMMAND_KEYMAP[arg].split()[1]:
+ value = splitted_line[1]
+ return value
+
+
+def get_custom_value(arg, config, module):
+ splitted_config = config.splitlines()
+ value = ''
+
+ if (arg.startswith('filter_list') or arg.startswith('prefix_list') or
+ arg.startswith('route_map')):
+ value = in_out_param(arg, splitted_config, module)
+ elif arg == 'send_community':
+ for line in splitted_config:
+ if PARAM_TO_COMMAND_KEYMAP[arg] in line:
+ splitted_line = line.split()
+ if len(splitted_line) == 1:
+ value = 'none'
+ else:
+ value = splitted_line[1]
+ elif arg == 'additional_paths_receive':
+ value = 'inherit'
+ for line in splitted_config:
+ if PARAM_TO_COMMAND_KEYMAP[arg] in line:
+ if 'disable' in line:
+ value = 'disable'
+ else:
+ value = 'enable'
+ elif arg == 'additional_paths_send':
+ value = 'inherit'
+ for line in splitted_config:
+ if PARAM_TO_COMMAND_KEYMAP[arg] in line:
+ if 'disable' in line:
+ value = 'disable'
+ else:
+ value = 'enable'
+ elif arg == 'advertise_map_exist':
+ value = []
+ for line in splitted_config:
+ if 'advertise-map' in line and 'exist-map' in line:
+ splitted_line = line.split()
+ value = [splitted_line[1], splitted_line[3]]
+ elif arg == 'advertise_map_non_exist':
+ value = []
+ for line in splitted_config:
+ if 'advertise-map' in line and 'non-exist-map' in line:
+ splitted_line = line.split()
+ value = [splitted_line[1], splitted_line[3]]
+ elif arg == 'allowas_in_max':
+ for line in splitted_config:
+ if 'allowas-in' in line:
+ splitted_line = line.split()
+ if len(splitted_line) == 2:
+ value = splitted_line[-1]
+ elif arg.startswith('max_prefix'):
+ for line in splitted_config:
+ if 'maximum-prefix' in line:
+ splitted_line = line.split()
+ if arg == 'max_prefix_limit':
+ value = splitted_line[1]
+ elif arg == 'max_prefix_interval' and 'restart' in line:
+ value = splitted_line[-1]
+ elif arg == 'max_prefix_threshold' and len(splitted_line) > 2:
+ try:
+ int(splitted_line[2])
+ value = splitted_line[2]
+ except ValueError:
+ value = ''
+ elif arg == 'max_prefix_warning':
+ if 'warning-only' in line:
+ value = True
+ else:
+ value = False
+ elif arg == 'soft_reconfiguration_in':
+ value = 'inherit'
+ for line in splitted_config:
+ if PARAM_TO_COMMAND_KEYMAP[arg] in line:
+ if 'always' in line:
+ value = 'always'
+ else:
+ value = 'enable'
+ elif arg == 'next_hop_third_party':
+ PRESENT_REGEX = re.compile(r'\s+{0}\s*'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ ABSENT_REGEX = re.compile(r'\s+no\s+{0}\s*'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+ try:
+ if ABSENT_REGEX.search(config):
+ value = False
+ elif PRESENT_REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ netcfg = get_config(module)
+
+ custom = [
+ 'allowas_in_max',
+ 'send_community',
+ 'additional_paths_send',
+ 'additional_paths_receive',
+ 'advertise_map_exist',
+ 'advertise_map_non_exist',
+ 'filter_list_in',
+ 'filter_list_out',
+ 'max_prefix_limit',
+ 'max_prefix_interval',
+ 'max_prefix_threshold',
+ 'max_prefix_warning',
+ 'next_hop_third_party',
+ 'prefix_list_in',
+ 'prefix_list_out',
+ 'route_map_in',
+ 'route_map_out',
+ 'soft_reconfiguration_in'
+ ]
+ try:
+ asn_regex = '.*router\sbgp\s(?P\d+).*'
+ match_asn = re.match(asn_regex, str(netcfg), re.DOTALL)
+ existing_asn_group = match_asn.groupdict()
+ existing_asn = existing_asn_group['existing_asn']
+ except AttributeError:
+ existing_asn = ''
+
+ if existing_asn:
+ parents = ["router bgp {0}".format(existing_asn)]
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+
+ parents.append('neighbor {0}'.format(module.params['neighbor']))
+ parents.append('address-family {0} {1}'.format(
+ module.params['afi'], module.params['safi']))
+ config = netcfg.get_section(parents)
+
+ if config:
+ for arg in args:
+ if arg not in ['asn', 'vrf', 'neighbor', 'afi', 'safi']:
+ if arg in custom:
+ existing[arg] = get_custom_value(arg, config, module)
+ else:
+ existing[arg] = get_value(arg, config, module)
+
+ existing['asn'] = existing_asn
+ existing['neighbor'] = module.params['neighbor']
+ existing['vrf'] = module.params['vrf']
+ existing['afi'] = module.params['afi']
+ existing['safi'] = module.params['safi']
+ else:
+ WARNINGS.append("The BGP process didn't exist but the task"
+ " just created it.")
+
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def get_address_family_command(key, value, module):
+ command = "address-family {0} {1}".format(
+ module.params['afi'], module.params['safi'])
+ return command
+
+
+def get_capability_additional_paths_receive_command(key, value, module):
+ command = ''
+ if value == 'enable':
+ command = key
+ elif value == 'disable':
+ command = '{0} {1}'.format(key, value)
+ return command
+
+
+def get_capability_additional_paths_send_command(key, value, module):
+ command = ''
+ if value == 'enable':
+ command = key
+ elif value == 'disable':
+ command = '{0} {1}'.format(key, value)
+ return command
+
+
+def get_advertise_map_exist_command(key, value, module):
+ command = 'advertise-map {0} exist-map {1}'.format(
+ value[0], value[1])
+ return command
+
+
+def get_advertise_map_non_exist_command(key, value, module):
+ command = 'advertise-map {0} non-exist-map {1}'.format(
+ value[0], value[1])
+ return command
+
+
+def get_allowas_in_max_command(key, value, module):
+ command = 'allowas-in {0}'.format(value)
+ return command
+
+
+def get_filter_list_in_command(key, value, module):
+ command = 'filter-list {0} in'.format(value)
+ return command
+
+
+def get_filter_list_out_command(key, value, module):
+ command = 'filter-list {0} out'.format(value)
+ return command
+
+
+def get_prefix_list_in_command(key, value, module):
+ command = 'prefix-list {0} in'.format(value)
+ return command
+
+
+def get_prefix_list_out_command(key, value, module):
+ command = 'prefix-list {0} out'.format(value)
+ return command
+
+
+def get_route_map_in_command(key, value, module):
+ command = 'route-map {0} in'.format(value)
+ return command
+
+
+def get_route_map_out_command(key, value, module):
+ command = 'route-map {0} out'.format(value)
+ return command
+
+
+def get_maximum_prefix_command(key, value, module):
+ return get_maximum_prefix_options_command(key, value, module)
+
+
+def get_maximum_prefix_options_command(key, value, module):
+ command = 'maximum-prefix {0}'.format(module.params['max_prefix_limit'])
+ if module.params['max_prefix_threshold']:
+ command += ' {0}'.format(module.params['max_prefix_threshold'])
+ if module.params['max_prefix_interval']:
+ command += ' restart {0}'.format(module.params['max_prefix_interval'])
+ elif module.params['max_prefix_warning']:
+ command += ' warning-only'
+ return command
+
+
+def get_soft_reconfiguration_inbound_command(key, value, module):
+ command = ''
+ if value == 'enable':
+ command = key
+ elif value == 'always':
+ command = '{0} {1}'.format(key, value)
+ return command
+
+
+def get_default_command(key, value, existing_commands):
+ command = ''
+ if key == 'send-community' and existing_commands.get(key) == 'none':
+ command = 'no {0}'.format(key)
+
+ elif existing_commands.get(key):
+ existing_value = existing_commands.get(key)
+ if value == 'inherit':
+ if existing_value != 'inherit':
+ command = 'no {0}'.format(key)
+ else:
+ if key == 'advertise-map exist':
+ command = 'no advertise-map {0} exist-map {1}'.format(
+ existing_value[0], existing_value[1])
+ elif key == 'advertise-map non-exist':
+ command = 'no advertise-map {0} non-exist-map {1}'.format(
+ existing_value[0], existing_value[1])
+ elif key == 'filter-list in':
+ command = 'no filter-list {0} in'.format(existing_value)
+ elif key == 'filter-list out':
+ command = 'no filter-list {0} out'.format(existing_value)
+ elif key == 'prefix-list in':
+ command = 'no prefix-list {0} in'.format(existing_value)
+ elif key == 'prefix-list out':
+ command = 'no prefix-list {0} out'.format(existing_value)
+ elif key == 'route-map in':
+ command = 'no route-map {0} in'.format(existing_value)
+ elif key == 'route-map out':
+ command = 'no route-map {0} out'.format(existing_value)
+ elif key.startswith('maximum-prefix'):
+ command = 'no maximum-prefix {0}'.format(
+ existing_commands.get('maximum-prefix'))
+ elif key == 'allowas-in max':
+ command = ['no allowas-in {0}'.format(existing_value)]
+ command.append('allowas-in')
+ else:
+ command = 'no {0} {1}'.format(key, existing_value)
+ else:
+ if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
+ command = 'no {0}'.format(key)
+ return command
+
+
+def fix_proposed(module, proposed):
+ allowas_in = proposed.get('allowas_in')
+ allowas_in_max = proposed.get('allowas_in_max')
+
+ if allowas_in is False and allowas_in_max:
+ proposed.pop('allowas_in_max')
+ elif allowas_in and allowas_in_max:
+ proposed.pop('allowas_in')
+
+ return proposed
+
+
+def state_present(module, existing, proposed, candidate):
+ commands = list()
+
+ proposed = fix_proposed(module, proposed)
+
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ custom = [
+ 'address-family',
+ 'capability additional-paths receive',
+ 'capability additional-paths send',
+ 'advertise-map exist',
+ 'advertise-map non-exist',
+ 'allowas-in max',
+ 'filter-list in',
+ 'filter-list out',
+ 'maximum-prefix',
+ 'maximum-prefix options',
+ 'prefix-list in',
+ 'prefix-list out',
+ 'route-map in',
+ 'route-map out',
+ 'soft-reconfiguration inbound'
+ ]
+ for key, value in proposed_commands.iteritems():
+ if key == 'send-community' and value == 'none':
+ commands.append('{0}'.format(key))
+
+ elif value is True and key != 'maximum-prefix options':
+ commands.append(key)
+
+ elif value is False and key != 'maximum-prefix options':
+ commands.append('no {0}'.format(key))
+
+ elif value == 'default' or value == 'inherit':
+ command = get_default_command(key, value, existing_commands)
+
+ if isinstance(command, str):
+ if command and command not in commands:
+ commands.append(command)
+ elif isinstance(command, list):
+ for cmd in command:
+ if cmd not in commands:
+ commands.append(cmd)
+
+ elif key in custom:
+ fixed_key = key.replace(' ', '_').replace('-', '_')
+ command = invoke('get_%s_command' % fixed_key, key, value, module)
+ if command and command not in commands:
+ commands.append(command)
+ else:
+ command = '{0} {1}'.format(key, value)
+ commands.append(command)
+
+ if commands:
+ parents = ["router bgp {0}".format(module.params['asn'])]
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+
+ parents.append('neighbor {0}'.format(module.params['neighbor']))
+
+ if len(commands) == 1:
+ candidate.add(commands, parents=parents)
+ elif len(commands) > 1:
+ af_command = 'address-family {0} {1}'.format(
+ module.params['afi'], module.params['safi'])
+ if af_command in commands:
+ commands.remove(af_command)
+ parents.append('address-family {0} {1}'.format(
+ module.params['afi'], module.params['safi']))
+ candidate.add(commands, parents=parents)
+
+
+def state_absent(module, existing, proposed, candidate):
+ commands = []
+ parents = ["router bgp {0}".format(module.params['asn'])]
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+
+ parents.append('neighbor {0}'.format(module.params['neighbor']))
+ commands.append('no address-family {0} {1}'.format(
+ module.params['afi'], module.params['safi']))
+ candidate.add(commands, parents=parents)
+
+
+def main():
+ argument_spec = dict(
+ asn=dict(required=True, type='str'),
+ vrf=dict(required=False, type='str', default='default'),
+ neighbor=dict(required=True, type='str'),
+ afi=dict(required=True, type='str'),
+ safi=dict(required=True, type='str'),
+ additional_paths_receive=dict(required=False, type='str',
+ choices=['enable', 'disable', 'inherit']),
+ additional_paths_send=dict(required=False, type='str',
+ choices=['enable', 'disable', 'inherit']),
+ advertise_map_exist=dict(required=False, type='list'),
+ advertise_map_non_exist=dict(required=False, type='list'),
+ allowas_in=dict(required=False, type='bool'),
+ allowas_in_max=dict(required=False, type='str'),
+ as_override=dict(required=False, type='bool'),
+ default_originate=dict(required=False, type='bool'),
+ default_originate_route_map=dict(required=False, type='str'),
+ filter_list_in=dict(required=False, type='str'),
+ filter_list_out=dict(required=False, type='str'),
+ max_prefix_limit=dict(required=False, type='str'),
+ max_prefix_interval=dict(required=False, type='str'),
+ max_prefix_threshold=dict(required=False, type='str'),
+ max_prefix_warning=dict(required=False, type='bool'),
+ next_hop_self=dict(required=False, type='bool'),
+ next_hop_third_party=dict(required=False, type='bool'),
+ prefix_list_in=dict(required=False, type='str'),
+ prefix_list_out=dict(required=False, type='str'),
+ route_map_in=dict(required=False, type='str'),
+ route_map_out=dict(required=False, type='str'),
+ route_reflector_client=dict(required=False, type='bool'),
+ send_community=dict(required=False, choices=['none',
+ 'both',
+ 'extended',
+ 'standard',
+ 'default']),
+ soft_reconfiguration_in=dict(required=False, type='str',
+ choices=['enable', 'always', 'inherit']),
+ soo=dict(required=False, type='str'),
+ suppress_inactive=dict(required=False, type='bool'),
+ unsuppress_map=dict(required=False, type='str'),
+ weight=dict(required=False, type='str'),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ mutually_exclusive=[['advertise_map_exist',
+ 'advertise_map_non_exist']],
+ supports_check_mode=True)
+
+ state = module.params['state']
+ if ((module.params['max_prefix_interval'] or
+ module.params['max_prefix_warning'] or
+ module.params['max_prefix_threshold']) and
+ not module.params['max_prefix_limit']):
+ module.fail_json(msg='max_prefix_limit is required when using '
+ 'max_prefix_warning, max_prefix_limit or '
+ 'max_prefix_threshold.')
+ if module.params['vrf'] == 'default' and module.params['soo']:
+ module.fail_json(msg='SOO is only allowed in non-default VRF')
+
+ args = [
+ 'afi',
+ 'asn',
+ 'neighbor',
+ 'additional_paths_receive',
+ 'additional_paths_send',
+ 'advertise_map_exist',
+ 'advertise_map_non_exist',
+ 'allowas_in',
+ 'allowas_in_max',
+ 'as_override',
+ 'default_originate',
+ 'default_originate_route_map',
+ 'filter_list_in',
+ 'filter_list_out',
+ 'max_prefix_limit',
+ 'max_prefix_interval',
+ 'max_prefix_threshold',
+ 'max_prefix_warning',
+ 'next_hop_self',
+ 'next_hop_third_party',
+ 'prefix_list_in',
+ 'prefix_list_out',
+ 'route_map_in',
+ 'route_map_out',
+ 'soft_reconfiguration_in',
+ 'soo',
+ 'suppress_inactive',
+ 'unsuppress_map',
+ 'weight',
+ 'route_reflector_client',
+ 'safi',
+ 'send_community',
+ 'vrf'
+ ]
+
+ existing = invoke('get_existing', module, args)
+ if existing.get('asn'):
+ if (existing.get('asn') != module.params['asn'] and
+ state == 'present'):
+ module.fail_json(msg='Another BGP ASN already exists.',
+ proposed_asn=module.params['asn'],
+ existing_asn=existing.get('asn'))
+
+ if module.params['advertise_map_exist'] == ['default']:
+ module.params['advertise_map_exist'] = 'default'
+ if module.params['advertise_map_non_exist'] == ['default']:
+ module.params['advertise_map_non_exist'] = 'default'
+
+ end_state = existing
+ proposed_args = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ proposed = {}
+ for key, value in proposed_args.iteritems():
+ if key not in ['asn', 'vrf', 'neighbor']:
+ if not isinstance(value, list):
+ if str(value).lower() == 'true':
+ value = True
+ elif str(value).lower() == 'false':
+ value = False
+ elif str(value).lower() == 'default':
+ value = PARAM_TO_DEFAULT_KEYMAP.get(key)
+ if value is None:
+ if key in BOOL_PARAMS:
+ value = False
+ else:
+ value = 'default'
+ if existing.get(key) or (not existing.get(key) and value):
+ proposed[key] = value
+
+ result = {}
+ if state == 'present' or (state == 'absent' and existing):
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed_args
+
+ if WARNINGS:
+ result['warnings'] = WARNINGS
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_command.py b/network/nxos/nxos_command.py
index 5ffcb908239..b90034a09e6 100644
--- a/network/nxos/nxos_command.py
+++ b/network/nxos/nxos_command.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: nxos_command
@@ -93,10 +97,10 @@
password: admin
transport: cli
-- name: run show verion on remote devices
+- name: run show version on remote devices
nxos_command:
commands: show version
- provider "{{ cli }}"
+ provider: "{{ cli }}"
- name: run show version and check to see if output contains Cisco
nxos_command:
@@ -111,7 +115,7 @@
- show interfaces
provider: "{{ cli }}"
-- name: run multiple commands and evalute the output
+- name: run multiple commands and evaluate the output
nxos_command:
commands:
- show version
@@ -144,14 +148,18 @@
failed_conditions:
description: the conditionals that failed
- retured: failed
+ returned: failed
type: list
sample: ['...', '...']
"""
+import ansible.module_utils.nxos
+
from ansible.module_utils.basic import get_exception
-from ansible.module_utils.netcli import CommandRunner, FailedConditionsError
-from ansible.module_utils.netcli import AddCommandError
-from ansible.module_utils.nxos import NetworkModule, NetworkError
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.netcli import CommandRunner
+from ansible.module_utils.netcli import FailedConditionsError
+from ansible.module_utils.netcli import FailedConditionalError
+from ansible.module_utils.netcli import AddCommandError, AddConditionError
VALID_KEYS = ['command', 'output', 'prompt', 'response']
@@ -186,7 +194,6 @@ def main():
)
module = NetworkModule(argument_spec=spec,
- connect_on_load=False,
supports_check_mode=True)
commands = list(parse_commands(module))
@@ -211,8 +218,12 @@ def main():
exc = get_exception()
warnings.append('duplicate command detected: %s' % cmd)
- for item in conditionals:
- runner.add_conditional(item)
+ try:
+ for item in conditionals:
+ runner.add_conditional(item)
+ except AddConditionError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), condition=exc.condition)
runner.retries = module.params['retries']
runner.interval = module.params['interval']
@@ -223,9 +234,12 @@ def main():
except FailedConditionsError:
exc = get_exception()
module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
+ except FailedConditionalError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), failed_conditional=exc.failed_conditional)
except NetworkError:
exc = get_exception()
- module.fail_json(msg=str(exc))
+ module.fail_json(msg=str(exc), **exc.kwargs)
result = dict(changed=False)
diff --git a/network/nxos/nxos_config.py b/network/nxos/nxos_config.py
index b7e40dd1edc..2355ec37d89 100644
--- a/network/nxos/nxos_config.py
+++ b/network/nxos/nxos_config.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: nxos_config
@@ -53,7 +57,7 @@
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
- This arugment is mutually exclusive with the I(lines) and
+ This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
required: false
default: null
@@ -70,7 +74,7 @@
after:
description:
- The ordered set of commands to append to the end of the command
- stack if a changed needs to be made. Just like with I(before) this
+ stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
@@ -81,11 +85,10 @@
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
- to position. Finally if match is set to I(exact), command lines
- must be an equal match.
- - Version 2.2 added a new choice I(none). When match is set to
- none, the configure is loaded into the remote device without
- consulting the configuration.
+ to position. If match is set to I(exact), command lines
+ must be an equal match. Finally, if match is set to I(none), the
+ module will not attempt to compare the source configuration with
+ the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
@@ -107,23 +110,21 @@
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
- the equivalient, set the match argument to none. This argument
+ the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: [ "true", "false" ]
- update:
+ backup:
description:
- - The I(update) argument controls how the configuration statements
- are processed on the remote device. Valid choices for the I(update)
- argument are I(merge) and I(check). When the argument is set to
- I(merge), the configuration changes are merged with the current
- device running configuration. When the argument is set to I(check)
- the configuration updates are determined but not actually configured
- on the remote device.
+ - This argument will cause the module to create a full backup of
+ the current C(running-config) from the remote device before any
+ changes are made. The backup file is written to the C(backup)
+ folder in the playbook root directory. If the directory does not
+ exist, it is created.
required: false
- default: merge
- choices: ['merge', 'check']
+ default: no
+ choices: ['yes', 'no']
version_added: "2.2"
config:
description:
@@ -148,7 +149,7 @@
version_added: "2.2"
save:
description:
- - The I(save) argument will instruct the module to save the
+ - The C(save) argument instructs the module to save the
running-config to startup-config. This operation is performed
after any changes are made to the current running config. If
no changes are made, the configuration is still saved to the
@@ -157,19 +158,8 @@
required: false
default: false
version_added: "2.2"
- state:
- description:
- - The I(state) argument specifies the state of the config
- file on the device. When set to present, the configuration
- is updated based on the values of the module. When the value
- is set to absent, the device startup config is erased.
- required: true
- default: present
- choices: ['present', 'absent']
- version_added: "2.2"
"""
-
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
@@ -213,26 +203,22 @@
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
- returned: always
+ returned: Only when lines is specified.
type: list
sample: ['...', '...']
+backup_path:
+ description: The full path to the backup file
+ returned: when backup is yes
+ type: path
+ sample: /playbooks/ansible/backup/nxos_config.2016-07-16@22:28:34
"""
-import time
-from ansible.module_utils.netcfg import NetworkConfig, dumps
-from ansible.module_utils.nxos import NetworkModule, NetworkError
+import ansible.module_utils.nxos
from ansible.module_utils.basic import get_exception
-
-def invoke(name, *args, **kwargs):
- func = globals().get(name)
- if func:
- return func(*args, **kwargs)
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.netcfg import NetworkConfig, dumps
def check_args(module, warnings):
- if module.params['save'] and module.check_mode:
- warnings.append('will not save configuration due to checkmode')
- if module.params['parents'] and module.params['src']:
- warnings.append('ignoring parents argument when src specified')
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
@@ -247,102 +233,59 @@ def get_candidate(module):
candidate.add(module.params['lines'], parents=parents)
return candidate
-def get_config(module, result):
- defaults = module.params['defaults']
- if defaults is True:
- key = '__configall__'
- else:
- key = '__config__'
-
- contents = module.params['config'] or result.get(key)
-
+def get_config(module):
+ contents = module.params['config']
if not contents:
+ defaults = module.params['defaults']
contents = module.config.get_config(include_defaults=defaults)
- result[key] = contents
-
- return NetworkConfig(indent=1, contents=contents)
-
-def backup_config(module, result):
- if '__config__' not in result:
- result['__config__'] = module.config.get_config()
- result['__backup__'] = result['__config__']
-
-def load_config(module, commands, result):
- if not module.check_mode:
- checkpoint = 'ansible_%s' % int(time.time())
- module.cli(['checkpoint %s' % checkpoint], output='text')
- result['__checkpoint__'] = checkpoint
- module.config.load_config(commands)
- result['changed'] = True
-
-def load_checkpoint(module, result):
- try:
- checkpoint = result['__checkpoint__']
- module.cli(['rollback running-config checkpoint %s' % checkpoint,
- 'no checkpoint %s' % checkpoint], output='text')
- except KeyError:
- module.fail_json(msg='unable to rollback, checkpoint not found')
- except NetworkError:
- exc = get_exception()
- msg = 'unable to rollback configuration'
- module.fail_json(msg=msg, checkpoint=checkpoint, **exc.kwargs)
+ return NetworkConfig(indent=2, contents=contents)
-def present(module, result):
+def run(module, result):
match = module.params['match']
replace = module.params['replace']
- update = module.params['update']
candidate = get_candidate(module)
if match != 'none':
- config = get_config(module, result)
- configobjs = candidate.difference(config, match=match, replace=replace)
+ config = get_config(module)
+ path = module.params['parents']
+ configobjs = candidate.difference(config, path=path, match=match,
+ replace=replace)
else:
- config = None
configobjs = candidate.items
- if module.params['backup']:
- backup_config(module, result)
-
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
- result['updates'] = commands
- if module.params['before']:
- commands[:0] = module.params['before']
+ if module.params['lines']:
+ if module.params['before']:
+ commands[:0] = module.params['before']
- if module.params['after']:
- commands.extend(module.params['after'])
+ if module.params['after']:
+ commands.extend(module.params['after'])
- # if the update mode is set to check just return
- # and do not try to load into the system
- if update != 'check':
- load_config(module, commands, result)
+ result['updates'] = commands
- # remove the checkpoint file used to restore the config
- # in case of an error
if not module.check_mode:
- module.cli('no checkpoint %s' % result['__checkpoint__'])
+ module.config.load_config(commands)
- if module.params['save'] and not module.check_mode:
- module.config.save_config()
result['changed'] = True
-def absent(module, result):
- if not module.check_mode:
- module.cli('write erase')
- result['changed'] = True
+ if module.params['save']:
+ if not module.check_mode:
+ module.config.save_config()
+ result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
+ src=dict(type='path'),
+
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
- src=dict(type='path'),
-
before=dict(type='list'),
after=dict(type='list'),
@@ -353,26 +296,25 @@ def main():
# it will be removed in a future version
force=dict(default=False, type='bool'),
- update=dict(choices=['merge', 'check'], default='merge'),
- backup=dict(type='bool', default=False),
-
config=dict(),
defaults=dict(type='bool', default=False),
+ backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
-
- state=dict(default='present', choices=['absent', 'present'])
)
mutually_exclusive = [('lines', 'src')]
+ required_if = [('match', 'strict', ['lines']),
+ ('match', 'exact', ['lines']),
+ ('replace', 'block', ['lines'])]
+
module = NetworkModule(argument_spec=argument_spec,
connect_on_load=False,
mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
supports_check_mode=True)
- state = module.params['state']
-
if module.params['force'] is True:
module.params['match'] = 'none'
@@ -381,14 +323,17 @@ def main():
result = dict(changed=False, warnings=warnings)
+ if module.params['backup']:
+ result['__backup__'] = module.config.get_config()
+
try:
- invoke(state, module, result)
+ run(module, result)
except NetworkError:
- load_checkpoint(module, result)
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**result)
+
if __name__ == '__main__':
main()
diff --git a/network/nxos/nxos_evpn_global.py b/network/nxos/nxos_evpn_global.py
new file mode 100644
index 00000000000..375269e0709
--- /dev/null
+++ b/network/nxos/nxos_evpn_global.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_evpn_global
+version_added: "2.2"
+short_description: Handles the EVPN control plane for VXLAN.
+description:
+ - Handles the EVPN control plane for VXLAN.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+options:
+ nv_overlay_evpn:
+ description:
+ - EVPN control plane.
+ required: true
+ choices: ['true', 'false']
+'''
+EXAMPLES = '''
+- nxos_evpn_global:
+ nv_overlay_evpn: true
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"nv_overlay_evpn": true}
+existing:
+ description: k/v pairs of existing configuration
+ returned: verbose mode
+ type: dict
+ sample: {"nv_overlay_evpn": false}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"nv_overlay_evpn": true}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["nv overlay evpn"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+# COMMON CODE FOR MIGRATION
+import re
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.shell import ShellError
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+PARAM_TO_COMMAND_KEYMAP = {
+ 'nv_overlay_evpn': 'nv overlay evpn',
+}
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(arg, config, module):
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+ if REGEX.search(config):
+ value = True
+ return value
+
+
+def get_existing(module):
+ existing = {}
+ config = str(get_config(module))
+
+ existing['nv_overlay_evpn'] = get_value('nv_overlay_evpn', config, module)
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def get_commands(module, existing, proposed, candidate):
+ commands = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ for key, value in proposed_commands.iteritems():
+ if value is True:
+ commands.append(key)
+ elif value is False:
+ commands.append('no {0}'.format(key))
+
+ if commands:
+ candidate.add(commands, parents=[])
+
+
+def main():
+ argument_spec = dict(
+ nv_overlay_evpn=dict(required=True, type='bool'),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ existing = invoke('get_existing', module)
+ end_state = existing
+ proposed = dict(nv_overlay_evpn=module.params['nv_overlay_evpn'])
+
+ result = {}
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('get_commands', module, existing, proposed, candidate)
+
+ if proposed != existing:
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_evpn_vni.py b/network/nxos/nxos_evpn_vni.py
new file mode 100644
index 00000000000..5d20addd63a
--- /dev/null
+++ b/network/nxos/nxos_evpn_vni.py
@@ -0,0 +1,492 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_evpn_vni
+version_added: "2.2"
+short_description: Manages Cisco EVPN VXLAN Network Identifier (VNI).
+description:
+ - Manages Cisco Ethernet Virtual Private Network (EVPN) VXLAN Network
+ Identifier (VNI) configurations of a Nexus device.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - default, where supported, restores params default value.
+ - RD override is not permitted. You should set it to the default values
+ first and then reconfigure it.
+ - C(route_target_both), C(route_target_import) and
+ C(route_target_export valid) values are a list of extended communities,
+ (i.e. ['1.2.3.4:5', '33:55']) or the keywords 'auto' or 'default'.
+ - The C(route_target_both) property is discouraged due to the inconsistent
+ behavior of the property across Nexus platforms and image versions.
+ For this reason it is recommended to use explicit C(route_target_export)
+ and C(route_target_import) properties instead of C(route_target_both).
+ - RD valid values are a string in one of the route-distinguisher formats,
+ the keyword 'auto', or the keyword 'default'.
+options:
+ vni:
+ description:
+ - The EVPN VXLAN Network Identifier.
+ required: true
+ default: null
+ route_distinguisher:
+ description:
+ - The VPN Route Distinguisher (RD). The RD is combined with
+ the IPv4 or IPv6 prefix learned by the PE router to create a
+ globally unique address.
+ required: true
+ default: null
+ route_target_both:
+ description:
+ - Enables/Disables route-target settings for both import and
+ export target communities using a single property.
+ required: false
+ default: null
+ route_target_import:
+ description:
+ - Sets the route-target 'import' extended communities.
+ required: false
+ default: null
+ route_target_export:
+ description:
+ - Sets the route-target 'import' extended communities.
+ required: false
+ default: null
+ state:
+ description:
+ - Determines whether the config should be present or not
+ on the device.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+EXAMPLES = '''
+- nxos_evpn_vni:
+ vni: 6000
+ route_distinguisher: "60:10"
+ route_target_import:
+ - "5000:10"
+ - "4100:100"
+ route_target_export: auto
+ route_target_both: default
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"route_target_import": ["5000:10", "4100:100",
+ "5001:10"],"vni": "6000"}
+existing:
+ description: k/v pairs of existing EVPN VNI configuration
+ returned: verbose mode
+ type: dict
+ sample: {"route_distinguisher": "70:10", "route_target_both": [],
+ "route_target_export": [], "route_target_import": [
+ "4100:100", "5000:10"], "vni": "6000"}
+end_state:
+ description: k/v pairs of EVPN VNI configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"route_distinguisher": "70:10", "route_target_both": [],
+ "route_target_export": [], "route_target_import": [
+ "4100:100", "5000:10", "5001:10"], "vni": "6000"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["evpn", "vni 6000 l2", "route-target import 5001:10"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+from ansible.module_utils.network import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+PARAM_TO_COMMAND_KEYMAP = {
+ 'vni': 'vni',
+ 'route_target_both': 'route-target both',
+ 'route_target_import': 'route-target import',
+ 'route_target_export': 'route-target export',
+ 'route_distinguisher': 'rd'
+}
+WARNINGS = []
+
+import time
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(arg, config, module):
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def get_route_target_value(arg, config, module):
+ splitted_config = config.splitlines()
+ value_list = []
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+
+ for line in splitted_config:
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in line.strip():
+ value = REGEX.search(line).group('value')
+ value_list.append(value)
+ return value_list
+
+
+def get_existing(module, args):
+ existing = {}
+ netcfg = get_config(module)
+ parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
+ config = netcfg.get_section(parents)
+
+ if config:
+ for arg in args:
+ if arg != 'vni':
+ if arg == 'route_distinguisher':
+ existing[arg] = get_value(arg, config, module)
+ else:
+ existing[arg] = get_route_target_value(arg, config, module)
+
+ existing_fix = dict((k, v) for k, v in existing.iteritems() if v)
+ if existing_fix:
+ existing['vni'] = module.params['vni']
+ else:
+ existing = existing_fix
+
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def state_present(module, existing, proposed):
+ commands = list()
+ parents = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ for key, value in proposed_commands.iteritems():
+ if key.startswith('route-target'):
+ if value == ['default']:
+ existing_value = existing_commands.get(key)
+
+ if existing_value:
+ for target in existing_value:
+ commands.append('no {0} {1}'.format(key, target))
+ else:
+ if not isinstance(value, list):
+ value = [value]
+ for target in value:
+ if existing:
+ if target not in existing.get(key.replace('-', '_').replace(' ', '_')):
+ commands.append('{0} {1}'.format(key, target))
+ else:
+ commands.append('{0} {1}'.format(key, target))
+ else:
+ if value == 'default':
+ existing_value = existing_commands.get(key)
+ if existing_value:
+ commands.append('no {0} {1}'.format(key, existing_value))
+ else:
+ command = '{0} {1}'.format(key, value)
+ commands.append(command)
+
+ if commands:
+ parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
+
+ return commands, parents
+
+
+def state_absent(module, existing, proposed):
+ commands = ['no vni {0} l2'.format(module.params['vni'])]
+ parents = ['evpn']
+ return commands, parents
+
+
+def execute_config(module, candidate):
+ result = {}
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ return result
+
+
+def main():
+ argument_spec = dict(
+ vni=dict(required=True, type='str'),
+ route_distinguisher=dict(required=False, type='str'),
+ route_target_both=dict(required=False, type='list'),
+ route_target_import=dict(required=False, type='list'),
+ route_target_export=dict(required=False, type='list'),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ args = [
+ 'vni',
+ 'route_distinguisher',
+ 'route_target_both',
+ 'route_target_import',
+ 'route_target_export'
+ ]
+
+ existing = invoke('get_existing', module, args)
+ end_state = existing
+ proposed_args = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ proposed = {}
+ for key, value in proposed_args.iteritems():
+ if key != 'vni':
+ if value == 'true':
+ value = True
+ elif value == 'false':
+ value = False
+ if existing.get(key) or (not existing.get(key) and value):
+ proposed[key] = value
+ result = {}
+ if state == 'present' or (state == 'absent' and existing):
+ candidate = CustomNetworkConfig(indent=3)
+ commands, parents = invoke('state_%s' % state, module, existing,
+ proposed)
+ if commands:
+ if (existing.get('route_distinguisher') and
+ proposed.get('route_distinguisher')):
+ if (existing['route_distinguisher'] != proposed[
+ 'route_distinguisher'] and
+ proposed['route_distinguisher'] != 'default'):
+ WARNINGS.append('EVPN RD {0} was automatically removed. '
+ 'It is highly recommended to use a task '
+ '(with default as value) to explicitly '
+ 'unconfigure it.'.format(
+ existing['route_distinguisher']))
+ remove_commands = ['no rd {0}'.format(
+ existing['route_distinguisher'])]
+
+ candidate.add(remove_commands, parents=parents)
+ result = execute_config(module, candidate)
+ time.sleep(30)
+
+ candidate = CustomNetworkConfig(indent=3)
+ candidate.add(commands, parents=parents)
+ result = execute_config(module, candidate)
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed_args
+
+ if WARNINGS:
+ result['warnings'] = WARNINGS
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_facts.py b/network/nxos/nxos_facts.py
index af8df350710..f5ec1238ee3 100644
--- a/network/nxos/nxos_facts.py
+++ b/network/nxos/nxos_facts.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: nxos_facts
@@ -100,7 +104,7 @@
# hardware
ansible_net_filesystems:
- description: All file system names availabe on the device
+ description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_memfree_mb:
@@ -172,9 +176,11 @@
"""
import re
+import ansible.module_utils.nxos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner, AddCommandError
-from ansible.module_utils.nxos import NetworkModule, NetworkError
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.six import iteritems
def add_command(runner, command, output=None):
@@ -193,6 +199,9 @@ def __init__(self, module, runner):
self.facts = dict()
self.commands()
+ def commands(self):
+ raise NotImplementedError
+
def transform_dict(self, data, keymap):
transform = dict()
for key, fact in keymap:
@@ -255,7 +264,7 @@ class Interfaces(FactsBase):
('state', 'state'),
('desc', 'description'),
('eth_bw', 'bandwidth'),
- ('eth_duplex','duplex'),
+ ('eth_duplex', 'duplex'),
('eth_speed', 'speed'),
('eth_mode', 'mode'),
('eth_hw_addr', 'macaddress'),
@@ -276,11 +285,11 @@ class Interfaces(FactsBase):
def commands(self):
add_command(self.runner, 'show interface', output='json')
- resp = self.module.cli(['show ipv6 interface | wc lines'])
- if int(resp[0]) > 1:
+ try:
+ self.module.cli('show ipv6 interface', 'json')
add_command(self.runner, 'show ipv6 interface', output='json')
self.ipv6 = True
- else:
+ except NetworkError:
self.ipv6 = False
try:
@@ -511,7 +520,7 @@ def main():
module.exit_json(out=module.from_json(runner.items))
ansible_facts = dict()
- for key, value in facts.iteritems():
+ for key, value in iteritems(facts):
# this is to maintain capability with nxos_facts 2.1
if key.startswith('_'):
ansible_facts[key[1:]] = value
diff --git a/network/nxos/nxos_feature.py b/network/nxos/nxos_feature.py
index b8625707cea..2a532dd5994 100644
--- a/network/nxos/nxos_feature.py
+++ b/network/nxos/nxos_feature.py
@@ -16,13 +16,17 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: nxos_feature
version_added: "2.1"
-short_description: Manage features in NX-OS switches
+short_description: Manage features in NX-OS switches.
description:
- - Offers ability to enable and disable features in NX-OS
+ - Offers ability to enable and disable features in NX-OS.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
@@ -41,12 +45,24 @@
'''
EXAMPLES = '''
-# Ensure lacp is enabled
-- nxos_feature: feature=lacp state=enabled host={{ inventory_hostname }}
-# Ensure ospf is disabled
-- nxos_feature: feature=ospf state=disabled host={{ inventory_hostname }}
-# Ensure vpc is enabled
-- nxos_feature: feature=vpc state=enabled host={{ inventory_hostname }}
+- name: Ensure lacp is enabled
+ nxos_feature:
+ feature: lacp
+ state: enabled
+ host: "{{ inventory_hostname }}"
+
+- name: Ensure ospf is disabled
+ nxos_feature:
+ feature: ospf
+ state: disabled
+ host: "{{ inventory_hostname }}"
+
+- name: Ensure vpc is enabled
+ nxos_feature:
+ feature: vpc
+ state: enabled
+ host: "{{ inventory_hostname }}"
+
'''
RETURN = '''
@@ -65,11 +81,6 @@
returned: always
type: dict
sample: {"state": "disabled"}
-state:
- description: state as sent in from the playbook
- returned: always
- type: string
- sample: "disabled"
updates:
description: commands sent to the device
returned: always
@@ -87,6 +98,163 @@
sample: "vpc"
'''
+import json
+import collections
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
def execute_config_command(commands, module):
try:
@@ -95,6 +263,15 @@ def execute_config_command(commands, module):
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
@@ -116,6 +293,11 @@ def get_cli_body_ssh(command, response, module):
def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
@@ -125,6 +307,19 @@ def execute_show(cmds, module, command_type=None):
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
return response
@@ -156,34 +351,39 @@ def apply_key_map(key_map, table):
def get_available_features(feature, module):
available_features = {}
+ feature_regex = '(?P\S+)\s+\d+\s+(?P.*)'
command = 'show feature'
- body = execute_show_command(command, module)
-
- try:
- body = body[0]['TABLE_cfcFeatureCtrlTable']['ROW_cfcFeatureCtrlTable']
- except (TypeError, IndexError):
- return available_features
-
- for each_feature in body:
- feature = each_feature['cfcFeatureCtrlName2']
- state = each_feature['cfcFeatureCtrlOpStatus2']
- if 'enabled' in state:
- state = 'enabled'
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+ split_body = body[0].splitlines()
- if feature not in available_features.keys():
- available_features[feature] = state
- else:
- if (available_features[feature] == 'disabled' and
- state == 'enabled'):
+ for line in split_body:
+ try:
+ match_feature = re.match(feature_regex, line, re.DOTALL)
+ feature_group = match_feature.groupdict()
+ feature = feature_group['feature']
+ state = feature_group['state']
+ except AttributeError:
+ feature = ''
+ state = ''
+
+ if feature and state:
+ if 'enabled' in state:
+ state = 'enabled'
+
+ if feature not in available_features:
available_features[feature] = state
+ else:
+ if (available_features[feature] == 'disabled' and
+ state == 'enabled'):
+ available_features[feature] = state
return available_features
+
def get_commands(proposed, existing, state, module):
feature = validate_feature(module, mode='config')
-
commands = []
feature_check = proposed == existing
if not feature_check:
@@ -205,10 +405,36 @@ def validate_feature(module, mode='show'):
feature_to_be_mapped = {
'show': {
- 'nv overlay': 'nve'},
+ 'nv overlay': 'nve',
+ 'vn-segment-vlan-based': 'vnseg_vlan',
+ 'hsrp': 'hsrp_engine',
+ 'fabric multicast': 'fabric_mcast',
+ 'scp-server': 'scpServer',
+ 'sftp-server': 'sftpServer',
+ 'sla responder': 'sla_responder',
+ 'sla sender': 'sla_sender',
+ 'ssh': 'sshServer',
+ 'tacacs+': 'tacacs',
+ 'telnet': 'telnetServer',
+ 'ethernet-link-oam': 'elo',
+ 'port-security': 'eth_port_sec'
+ },
'config':
{
- 'nve': 'nv overlay'}
+ 'nve': 'nv overlay',
+ 'vnseg_vlan': 'vn-segment-vlan-based',
+ 'hsrp_engine': 'hsrp',
+ 'fabric_mcast': 'fabric multicast',
+ 'scpServer': 'scp-server',
+ 'sftpServer': 'sftp-server',
+ 'sla_sender': 'sla sender',
+ 'sla_responder': 'sla responder',
+ 'sshServer': 'ssh',
+ 'tacacs': 'tacacs+',
+ 'telnetServer': 'telnet',
+ 'elo': 'ethernet-link-oam',
+ 'eth_port_sec': 'port-security'
+ }
}
if feature in feature_to_be_mapped[mode]:
@@ -222,15 +448,18 @@ def main():
feature=dict(type='str', required=True),
state=dict(choices=['enabled', 'disabled'], default='enabled',
required=False),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
)
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
feature = validate_feature(module)
state = module.params['state'].lower()
available_features = get_available_features(feature, module)
- if feature not in available_features.keys():
+ if feature not in available_features:
module.fail_json(
msg='Invalid feature name.',
features_currently_supported=available_features,
@@ -254,12 +483,13 @@ def main():
updated_features = get_available_features(feature, module)
existstate = updated_features[feature]
end_state = dict(state=existstate)
+ if 'configure' in cmds:
+ cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
- results['state'] = state
results['updates'] = cmds
results['changed'] = changed
results['feature'] = module.params['feature']
@@ -267,10 +497,5 @@ def main():
module.exit_json(**results)
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.nxos import *
if __name__ == '__main__':
main()
diff --git a/network/nxos/nxos_file_copy.py b/network/nxos/nxos_file_copy.py
new file mode 100644
index 00000000000..0885c4575e2
--- /dev/null
+++ b/network/nxos/nxos_file_copy.py
@@ -0,0 +1,416 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_file_copy
+version_added: "2.2"
+short_description: Copy a file to a remote NXOS device over SCP.
+description:
+ - Copy a file to the flash (or bootflash) remote network device
+ on NXOS devices.
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - The feature must be enabled with feature scp-server.
+ - If the file is already present (md5 sums match), no transfer will
+ take place.
+ - Check mode will tell you if the file would be copied.
+options:
+ local_file:
+ description:
+ - Path to local file. Local directory must exist.
+ required: true
+ remote_file:
+ description:
+ - Remote file path of the copy. Remote directories must exist.
+ If omitted, the name of the local file will be used.
+ required: false
+ default: null
+ file_system:
+ description:
+ - The remote file system of the device. If omitted,
+ devices that support a file_system parameter will use
+ their default values.
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+- nxos_file_copy:
+ local_file: "./test_file.txt"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+transfer_status:
+ description: Whether a file was transferred. "No Transfer" or "Sent".
+ returned: success
+ type: string
+ sample: 'Sent'
+local_file:
+ description: The path of the local file.
+ returned: success
+ type: string
+ sample: '/path/to/local/file'
+remote_file:
+ description: The path of the remote file.
+ returned: success
+ type: string
+ sample: '/path/to/remote/file'
+'''
+
+
+import os
+from scp import SCPClient
+import paramiko
+import time
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ cmds = [command]
+ body = execute_show(cmds, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def remote_file_exists(module, dst, file_system='bootflash:'):
+ command = 'dir {0}/{1}'.format(file_system, dst)
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+ if 'No such file' in body[0]:
+ return False
+ return True
+
+
+def verify_remote_file_exists(module, dst, file_system='bootflash:'):
+ command = 'dir {0}/{1}'.format(file_system, dst)
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+ if 'No such file' in body[0]:
+ return 0
+ return body[0].split()[0].strip()
+
+
+def local_file_exists(module):
+ return os.path.isfile(module.params['local_file'])
+
+
+def get_flash_size(module):
+ command = 'dir {}'.format(module.params['file_system'])
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+
+ match = re.search(r'(\d+) bytes free', body[0])
+ bytes_free = match.group(1)
+
+ return int(bytes_free)
+
+
+def enough_space(module):
+ flash_size = get_flash_size(module)
+ file_size = os.path.getsize(module.params['local_file'])
+ if file_size > flash_size:
+ return False
+
+ return True
+
+
+def transfer_file(module, dest):
+ file_size = os.path.getsize(module.params['local_file'])
+
+ if not local_file_exists(module):
+ module.fail_json(msg='Could not transfer file. Local file doesn\'t exist.')
+
+ if not enough_space(module):
+ module.fail_json(msg='Could not transfer file. Not enough space on device.')
+
+ hostname = module.params['host']
+ username = module.params['username']
+ password = module.params['password']
+
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ ssh.connect(
+ hostname=hostname,
+ username=username,
+ password=password)
+
+ full_remote_path = '{}{}'.format(module.params['file_system'], dest)
+ scp = SCPClient(ssh.get_transport())
+ try:
+ scp.put(module.params['local_file'], full_remote_path)
+ except:
+ time.sleep(10)
+ temp_size = verify_remote_file_exists(
+ module, dest, file_system=module.params['file_system'])
+ if int(temp_size) == int(file_size):
+ pass
+ else:
+ module.fail_json(msg='Could not transfer file. There was an error '
+ 'during transfer. Please make sure remote '
+ 'permissions are set.', temp_size=temp_size,
+ file_size=file_size)
+ scp.close()
+ return True
+
+
+def main():
+ argument_spec = dict(
+ local_file=dict(required=True),
+ remote_file=dict(required=False),
+ file_system=dict(required=False, default='bootflash:'),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ local_file = module.params['local_file']
+ remote_file = module.params['remote_file']
+ file_system = module.params['file_system']
+
+ changed = False
+ transfer_status = 'No Transfer'
+
+ if not os.path.isfile(local_file):
+ module.fail_json(msg="Local file {} not found".format(local_file))
+
+ dest = remote_file or os.path.basename(local_file)
+ remote_exists = remote_file_exists(module, dest, file_system=file_system)
+
+ if not remote_exists:
+ changed = True
+ file_exists = False
+ else:
+ file_exists = True
+
+ if not module.check_mode and not file_exists:
+ try:
+ transfer_file(module, dest)
+ transfer_status = 'Sent'
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg=str(clie))
+
+ if remote_file is None:
+ remote_file = os.path.basename(local_file)
+
+ module.exit_json(changed=changed,
+ transfer_status=transfer_status,
+ local_file=local_file,
+ remote_file=remote_file,
+ file_system=file_system)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_gir.py b/network/nxos/nxos_gir.py
new file mode 100644
index 00000000000..f72f7d79a07
--- /dev/null
+++ b/network/nxos/nxos_gir.py
@@ -0,0 +1,511 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_gir
+version_added: "2.2"
+short_description: Trigger a graceful removal or insertion (GIR) of the switch.
+description:
+ - Trigger a graceful removal or insertion (GIR) of the switch.
+extends_documentation_fragment: nxos
+author:
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - C(state) has effect only in combination with
+ C(system_mode_maintenance_timeout) or
+ C(system_mode_maintenance_on_reload_reset_reason).
+ - Using C(system_mode_maintenance) and
+ C(system_mode_maintenance_dont_generate_profile) would make the module
+ fail, but the system mode will be triggered anyway.
+options:
+ system_mode_maintenance:
+ description:
+ - When C(system_mode_maintenance=true) it puts all enabled
+ protocols in maintenance mode (using the isolate command).
+ When C(system_mode_maintenance=false) it puts all enabled
+ protocols in normal mode (using the no isolate command).
+ required: false
+ default: null
+ choices: ['true','false']
+ system_mode_maintenance_dont_generate_profile:
+ description:
+ - When C(system_mode_maintenance_dont_generate_profile=true) it
+ prevents the dynamic searching of enabled protocols and executes
+ commands configured in a maintenance-mode profile.
+ Use this option if you want the system to use a maintenance-mode
+ profile that you have created.
+ When C(system_mode_maintenance_dont_generate_profile=false) it
+ prevents the dynamic searching of enabled protocols and executes
+ commands configured in a normal-mode profile. Use this option if
+ you want the system to use a normal-mode profile that
+ you have created.
+ required: false
+ default: null
+ choices: ['true','false']
+ system_mode_maintenance_timeout:
+ description:
+ - Keeps the switch in maintenance mode for a specified
+ number of minutes. Range is 5-65535.
+ required: false
+ default: null
+ system_mode_maintenance_shutdown:
+ description:
+ - Shuts down all protocols, vPC domains, and interfaces except
+ the management interface (using the shutdown command).
+ This option is disruptive while C(system_mode_maintenance)
+ (which uses the isolate command) is not.
+ required: false
+ default: null
+ choices: ['true','false']
+ system_mode_maintenance_on_reload_reset_reason:
+ description:
+ - Boots the switch into maintenance mode automatically in the
+ event of a specified system crash.
+ required: false
+ default: null
+ choices: ['hw_error','svc_failure','kern_failure','wdog_timeout',
+ 'fatal_error','lc_failure','match_any','manual_reload']
+ state:
+ description:
+ - Specify desired state of the resource.
+ required: true
+ default: present
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+# Trigger system maintenance mode
+- nxos_gir:
+ system_mode_maintenance: true
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+# Trigger system normal mode
+- nxos_gir:
+ system_mode_maintenance: false
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+# Configure on-reload reset-reason for maintenance mode
+- nxos_gir:
+ system_mode_maintenance_on_reload_reset_reason: manual_reload
+ state: present
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+# Add on-reload reset-reason for maintenance mode
+- nxos_gir:
+ system_mode_maintenance_on_reload_reset_reason: hw_error
+ state: present
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+# Remove on-reload reset-reason for maintenance mode
+- nxos_gir:
+ system_mode_maintenance_on_reload_reset_reason: manual_reload
+ state: absent
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+# Set timeout for maintenance mode
+- nxos_gir:
+ system_mode_maintenance_timeout: 30
+ state: present
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+# Remove timeout for maintenance mode
+- nxos_gir:
+ system_mode_maintenance_timeout: 30
+ state: absent
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+final_system_mode:
+ description: describe the last system mode
+ returned: verbose mode
+ type: string
+ sample: normal
+updates:
+ description: commands sent to the device
+ returned: verbose mode
+ type: list
+ sample: ["terminal dont-ask", "system mode maintenance timeout 10"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show_ascii'):
+ cmds = [command]
+ if module.params['transport'] == 'cli':
+ body = execute_show(cmds, module)
+ elif module.params['transport'] == 'nxapi':
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_system_mode(module):
+ command = 'show system mode'
+ body = execute_show_command(command, module)[0]
+ if 'normal' in body.lower():
+ mode = 'normal'
+ else:
+ mode = 'maintenance'
+ return mode
+
+
+def get_maintenance_timeout(module):
+ command = 'show maintenance timeout'
+ body = execute_show_command(command, module)[0]
+ timeout = body.split()[4]
+ return timeout
+
+
+def get_reset_reasons(module):
+ command = 'show maintenance on-reload reset-reasons'
+ body = execute_show_command(command, module)[0]
+ return body
+
+
+def get_commands(module, state, mode):
+ commands = list()
+ system_mode = ''
+ if module.params['system_mode_maintenance'] is True and mode == 'normal':
+ commands.append('system mode maintenance')
+ elif (module.params['system_mode_maintenance'] is False and
+ mode == 'maintenance'):
+ commands.append('no system mode maintenance')
+
+ elif (module.params[
+ 'system_mode_maintenance_dont_generate_profile'] is True and
+ mode == 'normal'):
+ commands.append('system mode maintenance dont-generate-profile')
+ elif (module.params[
+ 'system_mode_maintenance_dont_generate_profile'] is False and
+ mode == 'maintenance'):
+ commands.append('no system mode maintenance dont-generate-profile')
+
+ elif module.params['system_mode_maintenance_timeout']:
+ timeout = get_maintenance_timeout(module)
+ if (state == 'present' and
+ timeout != module.params['system_mode_maintenance_timeout']):
+ commands.append('system mode maintenance timeout {0}'.format(
+ module.params['system_mode_maintenance_timeout']))
+ elif (state == 'absent' and
+ timeout == module.params['system_mode_maintenance_timeout']):
+ commands.append('no system mode maintenance timeout {0}'.format(
+ module.params['system_mode_maintenance_timeout']))
+
+ elif module.params['system_mode_maintenance_shutdown'] is True:
+ commands.append('system mode maintenance shutdown')
+
+ elif module.params['system_mode_maintenance_on_reload_reset_reason']:
+ reset_reasons = get_reset_reasons(module)
+ if (state == 'present' and
+ module.params[
+ 'system_mode_maintenance_on_reload_reset_reason'].lower() not
+ in reset_reasons.lower()):
+ commands.append('system mode maintenance on-reload '
+ 'reset-reason {0}'.format(
+ module.params[
+ 'system_mode_maintenance_on_reload_reset_reason']))
+ elif (state == 'absent' and
+ module.params[
+ 'system_mode_maintenance_on_reload_reset_reason'].lower() in
+ reset_reasons.lower()):
+ commands.append('no system mode maintenance on-reload '
+ 'reset-reason {0}'.format(
+ module.params[
+ 'system_mode_maintenance_on_reload_reset_reason']))
+
+ if commands:
+ commands.insert(0, 'terminal dont-ask')
+ return commands
+
+
+def main():
+ argument_spec = dict(
+ system_mode_maintenance=dict(required=False, type='bool'),
+ system_mode_maintenance_dont_generate_profile=dict(required=False,
+ type='bool'),
+ system_mode_maintenance_timeout=dict(required=False, type='str'),
+ system_mode_maintenance_shutdown=dict(required=False, type='bool'),
+ system_mode_maintenance_on_reload_reset_reason=dict(required=False,
+ choices=['hw_error','svc_failure','kern_failure',
+ 'wdog_timeout','fatal_error','lc_failure',
+ 'match_any','manual_reload']),
+ state=dict(choices=['absent', 'present', 'default'],
+ default='present', required=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ mutually_exclusive=[[
+ 'system_mode_maintenance',
+ 'system_mode_maintenance_dont_generate_profile',
+ 'system_mode_maintenance_timeout',
+ 'system_mode_maintenance_shutdown',
+ 'system_mode_maintenance_on_reload_reset_reason'
+ ]],
+ required_one_of=[[
+ 'system_mode_maintenance',
+ 'system_mode_maintenance_dont_generate_profile',
+ 'system_mode_maintenance_timeout',
+ 'system_mode_maintenance_shutdown',
+ 'system_mode_maintenance_on_reload_reset_reason'
+ ]],
+ supports_check_mode=True)
+
+ state = module.params['state']
+ mode = get_system_mode(module)
+ commands = get_commands(module, state, mode)
+ changed = False
+ if commands:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=commands)
+ else:
+ execute_config_command(commands, module)
+ changed = True
+
+ result = {}
+ result['connected'] = module.connected
+ result['changed'] = changed
+ if module._verbosity > 0:
+ final_system_mode = get_system_mode(module)
+ result['final_system_mode'] = final_system_mode
+ result['updates'] = commands
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/network/nxos/nxos_gir_profile_management.py b/network/nxos/nxos_gir_profile_management.py
new file mode 100644
index 00000000000..99c29d6253d
--- /dev/null
+++ b/network/nxos/nxos_gir_profile_management.py
@@ -0,0 +1,383 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_gir_profile
+version_added: "2.2"
+short_description: Create a maintenance-mode or normal-mode profile for GIR.
+description:
+ - Manage a maintenance-mode or normal-mode profile with configuration
+ commands that can be applied during graceful removal
+ or graceful insertion.
+extends_documentation_fragment: nxos
+author:
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - This module is not idempotent when C(state=present).
+ - C(state=absent) removes the whole profile.
+options:
+ commands:
+ description:
+ - List of commands to be included into the profile.
+ required: false
+ default: null
+ mode:
+ description:
+ - Configure the profile as Maintenance or Normal mode.
+ required: true
+ choices: ['maintenance', 'normal']
+ state:
+ description:
+ - Specify desired state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+ include_defaults:
+ description:
+ - Specify to retrieve or not the complete running configuration
+ for module operations.
+ required: false
+ default: false
+ choices: ['true','false']
+ config:
+ description:
+ - Specify the configuration string to be used for module operations.
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+# Create a maintenance-mode profile
+- nxos_gir_profile:
+ mode: maintenance
+ commands:
+ - router eigrp 11
+ - isolate
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+# Remove the maintenance-mode profile
+- nxos_gir_profile:
+ mode: maintenance
+ state: absent
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: list of commands passed into module.
+ returned: verbose mode
+ type: list
+ sample: ["router eigrp 11", "isolate"]
+existing:
+ description: list of existing profile commands.
+ returned: verbose mode
+ type: list
+ sample: ["router bgp 65535","isolate","router eigrp 10","isolate",
+ "diagnostic bootup level complete"]
+end_state:
+ description: list of profile entries after module execution.
+ returned: verbose mode
+ type: list
+ sample: ["router bgp 65535","isolate","router eigrp 10","isolate",
+ "diagnostic bootup level complete","router eigrp 11", "isolate"]
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["configure maintenance profile maintenance-mode",
+ "router eigrp 11","isolate"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def get_existing(module):
+ existing = []
+ netcfg = get_config(module)
+
+ if module.params['mode'] == 'maintenance':
+ parents = ['configure maintenance profile maintenance-mode']
+ else:
+ parents = ['configure maintenance profile normal-mode']
+
+ config = netcfg.get_section(parents)
+ if config:
+ existing = config.splitlines()
+ existing = [cmd.strip() for cmd in existing]
+ existing.pop(0)
+
+ return existing
+
+
+def state_present(module, existing, commands):
+ cmds = list()
+ cmds.extend(commands)
+ if module.params['mode'] == 'maintenance':
+ cmds.insert(0, 'configure maintenance profile maintenance-mode')
+ else:
+ cmds.insert(0, 'configure maintenance profile normal-mode')
+
+ return cmds
+
+
+def state_absent(module, existing, commands):
+ if module.params['mode'] == 'maintenance':
+ cmds = ['no configure maintenance profile maintenance-mode']
+ else:
+ cmds = ['no configure maintenance profile normal-mode']
+ return cmds
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def main():
+ argument_spec = dict(
+ commands=dict(required=False, type='list'),
+ mode=dict(required=True, choices=['maintenance', 'normal']),
+ state=dict(choices=['absent', 'present'],
+ default='present'),
+ include_defaults=dict(default=False),
+ config=dict()
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ commands = module.params['commands'] or []
+
+ if state == 'absent' and commands:
+ module.fail_json(msg='when state is absent, no command can be used.')
+
+ existing = invoke('get_existing', module)
+ end_state = existing
+ changed = False
+
+ result = {}
+ cmds = []
+ if state == 'present' or (state == 'absent' and existing):
+ cmds = invoke('state_%s' % state, module, existing, commands)
+
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ execute_config_command(cmds, module)
+ changed = True
+ end_state = invoke('get_existing', module)
+
+ result['connected'] = module.connected
+ result['changed'] = changed
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = commands
+ result['updates'] = cmds
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_hsrp.py b/network/nxos/nxos_hsrp.py
new file mode 100644
index 00000000000..9e9e7e3542a
--- /dev/null
+++ b/network/nxos/nxos_hsrp.py
@@ -0,0 +1,721 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_hsrp
+version_added: "2.2"
+short_description: Manages HSRP configuration on NX-OS switches.
+description:
+ - Manages HSRP configuration on NX-OS switches.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - HSRP feature needs to be enabled first on the system.
+ - SVIs must exist before using this module.
+ - Interface must be a L3 port before using this module.
+ - HSRP cannot be configured on loopback interfaces.
+ - MD5 authentication is only possible with HSRPv2 while it is ignored if
+ HSRPv1 is used instead, while it will not raise any error. Here we allow
+ MD5 authentication only with HSRPv2 in order to enforce better practice.
+options:
+ group:
+ description:
+ - HSRP group number.
+ required: true
+ interface:
+ description:
+ - Full name of interface that is being managed for HSRP.
+ required: true
+ version:
+ description:
+ - HSRP version.
+ required: false
+ default: 2
+ choices: ['1','2']
+ priority:
+ description:
+ - HSRP priority.
+ required: false
+ default: null
+ vip:
+ description:
+ - HSRP virtual IP address.
+ required: false
+ default: null
+ auth_string:
+ description:
+ - Authentication string.
+ required: false
+ default: null
+ auth_type:
+ description:
+ - Authentication type.
+ required: false
+ default: null
+ choices: ['text','md5']
+ state:
+ description:
+ - Specify desired state of the resource.
+ required: false
+ choices: ['present','absent']
+ default: 'present'
+'''
+
+EXAMPLES = '''
+- name: Ensure HSRP is configured with following params on a SVI
+ nxos_hsrp:
+ group: 10
+ vip: 10.1.1.1
+ priority: 150
+ interface: vlan10
+ preempt: enabled
+ host: 68.170.147.165
+
+- name: Ensure HSRP is configured with following params on a SVI
+ nxos_hsrp:
+ group: 10
+ vip: 10.1.1.1
+ priority: 150
+ interface: vlan10
+ preempt: enabled
+ host: 68.170.147.165
+ auth_type: text
+ auth_string: CISCO
+
+- name: Remove HSRP config for given interface, group, and VIP
+ nxos_hsrp:
+ group: 10
+ interface: vlan10
+ vip: 10.1.1.1
+ host: 68.170.147.165
+ state: absent
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"group": "30", "version": "2", "vip": "10.30.1.1"}
+existing:
+ description: k/v pairs of existing hsrp info on the interface
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of hsrp after module execution
+ returned: always
+ type: dict
+ sample: {"auth_string": "cisco", "auth_type": "text",
+ "group": "30", "interface": "vlan10", "preempt": "disabled",
+ "priority": "100", "version": "2", "vip": "10.30.1.1"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["interface vlan10", "hsrp version 2", "hsrp 30", "ip 10.30.1.1"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import json
+
+# COMMON CODE FOR MIGRATION
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+from ansible.module_utils.network import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+def execute_config_command(commands, module):
+ try:
+ output = module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ output = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ return output
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0]:
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ response = response[0].replace(command + '\n\n', '').strip()
+ body = [json.loads(response)]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = str(value)
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def get_interface_type(interface):
+ if interface.upper().startswith('ET'):
+ return 'ethernet'
+ elif interface.upper().startswith('VL'):
+ return 'svi'
+ elif interface.upper().startswith('LO'):
+ return 'loopback'
+ elif interface.upper().startswith('MG'):
+ return 'management'
+ elif interface.upper().startswith('MA'):
+ return 'management'
+ elif interface.upper().startswith('PO'):
+ return 'portchannel'
+ else:
+ return 'unknown'
+
+
+def get_interface_mode(interface, intf_type, module):
+ command = 'show interface {0}'.format(interface)
+ interface = {}
+ mode = 'unknown'
+
+ if intf_type in ['ethernet', 'portchannel']:
+ body = execute_show_command(command, module)[0]
+ interface_table = body['TABLE_interface']['ROW_interface']
+ mode = str(interface_table.get('eth_mode', 'layer3'))
+ if mode == 'access' or mode == 'trunk':
+ mode = 'layer2'
+ elif intf_type == 'svi':
+ mode = 'layer3'
+ return mode
+
+
+def get_hsrp_groups_on_interfaces(device, module):
+ command = 'show hsrp all'
+ body = execute_show_command(command, module)
+ hsrp = {}
+
+ try:
+ get_data = body[0]['TABLE_grp_detail']['ROW_grp_detail']
+ except (KeyError, AttributeError):
+ return {}
+
+ for entry in get_data:
+ interface = str(entry['sh_if_index'].lower())
+ value = hsrp.get(interface, 'new')
+ if value == 'new':
+ hsrp[interface] = []
+ group = str(entry['sh_group_num'])
+ hsrp[interface].append(group)
+
+ return hsrp
+
+
+def get_hsrp_group(group, interface, module):
+ command = 'show hsrp group {0}'.format(group)
+ body = execute_show_command(command, module)
+ hsrp = {}
+
+ hsrp_key = {
+ 'sh_if_index': 'interface',
+ 'sh_group_num': 'group',
+ 'sh_group_version': 'version',
+ 'sh_cfg_prio': 'priority',
+ 'sh_preempt': 'preempt',
+ 'sh_vip': 'vip',
+ 'sh_authentication_type': 'auth_type',
+ 'sh_authentication_data': 'auth_string'
+ }
+
+ try:
+ hsrp_table = body[0]['TABLE_grp_detail']['ROW_grp_detail']
+ except (AttributeError, IndexError, TypeError):
+ return {}
+
+ if isinstance(hsrp_table, dict):
+ hsrp_table = [hsrp_table]
+
+ for hsrp_group in hsrp_table:
+ parsed_hsrp = apply_key_map(hsrp_key, hsrp_group)
+
+ parsed_hsrp['interface'] = parsed_hsrp['interface'].lower()
+
+ if parsed_hsrp['version'] == 'v1':
+ parsed_hsrp['version'] = '1'
+ elif parsed_hsrp['version'] == 'v2':
+ parsed_hsrp['version'] = '2'
+
+ if parsed_hsrp['interface'] == interface:
+ return parsed_hsrp
+
+ return hsrp
+
+
+def get_commands_remove_hsrp(group, interface):
+ commands = []
+ commands.append('interface {0}'.format(interface))
+ commands.append('no hsrp {0}'.format(group))
+ return commands
+
+
+def get_commands_config_hsrp(delta, interface, args):
+ commands = []
+
+ config_args = {
+ 'group': 'hsrp {group}',
+ 'priority': 'priority {priority}',
+ 'preempt': '{preempt}',
+ 'vip': 'ip {vip}'
+ }
+
+ preempt = delta.get('preempt', None)
+ group = delta.get('group', None)
+ if preempt:
+ if preempt == 'enabled':
+ delta['preempt'] = 'preempt'
+ elif preempt == 'disabled':
+ delta['preempt'] = 'no preempt'
+
+ for key, value in delta.iteritems():
+ command = config_args.get(key, 'DNE').format(**delta)
+ if command and command != 'DNE':
+ if key == 'group':
+ commands.insert(0, command)
+ else:
+ commands.append(command)
+ command = None
+
+ auth_type = delta.get('auth_type', None)
+ auth_string = delta.get('auth_string', None)
+ if auth_type or auth_string:
+ if not auth_type:
+ auth_type = args['auth_type']
+ elif not auth_string:
+ auth_string = args['auth_string']
+ if auth_type == 'md5':
+ command = 'authentication md5 key-string {0}'.format(auth_string)
+ commands.append(command)
+ elif auth_type == 'text':
+ command = 'authentication text {0}'.format(auth_string)
+ commands.append(command)
+
+ if commands and not group:
+ commands.insert(0, 'hsrp {0}'.format(args['group']))
+
+ version = delta.get('version', None)
+ if version:
+ if version == '2':
+ command = 'hsrp version 2'
+ elif version == '1':
+ command = 'hsrp version 1'
+ commands.insert(0, command)
+ commands.insert(0, 'interface {0}'.format(interface))
+
+ if commands:
+ if not commands[0].startswith('interface'):
+ commands.insert(0, 'interface {0}'.format(interface))
+
+ return commands
+
+
+def is_default(interface, module):
+ command = 'show run interface {0}'.format(interface)
+
+ try:
+ body = execute_show_command(command, module)[0]
+ if 'invalid' in body.lower():
+ return 'DNE'
+ else:
+ raw_list = body.split('\n')
+ if raw_list[-1].startswith('interface'):
+ return True
+ else:
+ return False
+ except (KeyError):
+ return 'DNE'
+
+
+def validate_config(body, vip, module):
+ new_body = ''.join(body)
+ if "invalid ip address" in new_body.lower():
+ module.fail_json(msg="Invalid VIP. Possible duplicate IP address.",
+ vip=vip)
+
+
+def validate_params(param, module):
+ value = module.params[param]
+ version = module.params['version']
+
+ if param == 'group':
+ try:
+ if (int(value) < 0 or int(value) > 255) and version == '1':
+ raise ValueError
+ elif int(value) < 0 or int(value) > 4095:
+ raise ValueError
+ except ValueError:
+ module.fail_json(msg="Warning! 'group' must be an integer between"
+ " 0 and 255 when version 1 and up to 4095 "
+ "when version 2.", group=value,
+ version=version)
+ elif param == 'priority':
+ try:
+ if (int(value) < 0 or int(value) > 255):
+ raise ValueError
+ except ValueError:
+ module.fail_json(msg="Warning! 'priority' must be an integer "
+ "between 0 and 255", priority=value)
+
+
+def main():
+ argument_spec = dict(
+ group=dict(required=True, type='str'),
+ interface=dict(required=True),
+ version=dict(choices=['1', '2'], default='2', required=False),
+ priority=dict(type='str', required=False),
+ preempt=dict(type='str', choices=['disabled', 'enabled'],
+ required=False),
+ vip=dict(type='str', required=False),
+ auth_type=dict(choices=['text', 'md5'], required=False),
+ auth_string=dict(type='str', required=False),
+ state=dict(choices=['absent', 'present'], required=False,
+ default='present'),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ interface = module.params['interface'].lower()
+ group = module.params['group']
+ version = module.params['version']
+ state = module.params['state']
+ priority = module.params['priority']
+ preempt = module.params['preempt']
+ vip = module.params['vip']
+ auth_type = module.params['auth_type']
+ auth_string = module.params['auth_string']
+
+ transport = module.params['transport']
+
+ if state == 'present' and not vip:
+ module.fail_json(msg='the "vip" param is required when state=present')
+
+ for param in ['group', 'priority']:
+ if module.params[param] is not None:
+ validate_params(param, module)
+
+ intf_type = get_interface_type(interface)
+ if (intf_type != 'ethernet' and transport == 'cli'):
+ if is_default(interface, module) == 'DNE':
+ module.fail_json(msg='That interface does not exist yet. Create '
+ 'it first.', interface=interface)
+ if intf_type == 'loopback':
+ module.fail_json(msg="Loopback interfaces don't support HSRP.",
+ interface=interface)
+
+ mode = get_interface_mode(interface, intf_type, module)
+ if mode == 'layer2':
+ module.fail_json(msg='That interface is a layer2 port.\nMake it '
+ 'a layer 3 port first.', interface=interface)
+
+ if auth_type or auth_string:
+ if not (auth_type and auth_string):
+ module.fail_json(msg='When using auth parameters, you need BOTH '
+ 'auth_type AND auth_string.')
+
+ args = dict(group=group, version=version, priority=priority,
+ preempt=preempt, vip=vip, auth_type=auth_type,
+ auth_string=auth_string)
+
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+
+ existing = get_hsrp_group(group, interface, module)
+
+ # This will enforce better practice with md5 and hsrp version.
+ if proposed.get('auth_type', None) == 'md5':
+ if proposed['version'] == '1':
+ module.fail_json(msg="It's recommended to use HSRP v2 "
+ "when auth_type=md5")
+
+ elif not proposed.get('auth_type', None) and existing:
+ if (proposed['version'] == '1' and
+ existing['auth_type'] == 'md5'):
+ module.fail_json(msg="Existing auth_type is md5. It's recommended "
+ "to use HSRP v2 when using md5")
+
+ changed = False
+ end_state = existing
+ commands = []
+ if state == 'present':
+ delta = dict(
+ set(proposed.iteritems()).difference(existing.iteritems()))
+ if delta:
+ command = get_commands_config_hsrp(delta, interface, args)
+ commands.extend(command)
+
+ elif state == 'absent':
+ if existing:
+ command = get_commands_remove_hsrp(group, interface)
+ commands.extend(command)
+
+ if commands:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=commands)
+ else:
+ body = execute_config_command(commands, module)
+ if transport == 'cli':
+ validate_config(body, vip, module)
+ changed = True
+ end_state = get_hsrp_group(group, interface, module)
+ if 'configure' in commands:
+ commands.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = commands
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_igmp.py b/network/nxos/nxos_igmp.py
new file mode 100644
index 00000000000..3cd5a0dbd20
--- /dev/null
+++ b/network/nxos/nxos_igmp.py
@@ -0,0 +1,399 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_igmp
+version_added: "2.2"
+short_description: Manages IGMP global configuration.
+description:
+ - Manages IGMP global configuration configuration settings.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - When C(state=default), all supported params will be reset to a
+ default state.
+ - If restart is set to true with other params set, the restart will happen
+ last, i.e. after the configuration takes place.
+options:
+ flush_routes:
+ description:
+ - Removes routes when the IGMP process is restarted. By default,
+ routes are not flushed.
+ required: false
+ default: null
+ choices: ['true', 'false']
+ enforce_rtr_alert:
+ description:
+ - Enables or disables the enforce router alert option check for
+ IGMPv2 and IGMPv3 packets.
+ required: false
+ default: null
+ choices: ['true', 'false']
+ restart:
+ description:
+ - Restarts the igmp process (using an exec config command).
+ required: false
+ default: null
+ choices: ['true', 'false']
+ state:
+ description:
+ - Manages desired state of the resource.
+ required: false
+ default: present
+ choices: ['present', 'default']
+'''
+EXAMPLES = '''
+- name: Default igmp global params (all params except restart)
+ nxos_igmp:
+ state: default
+ host: "{{ inventory_hostname }}"
+
+- name: Ensure the following igmp global config exists on the device
+ nxos_igmp:
+ flush_routes: true
+ enforce_rtr_alert: true
+ host: "{{ inventory_hostname }}"
+
+- name: Restart the igmp process
+ nxos_igmp:
+ restart: true
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"enforce_rtr_alert": true, "flush_routes": true}
+existing:
+ description: k/v pairs of existing IGMP configuration
+ returned: verbose mode
+ type: dict
+ sample: {"enforce_rtr_alert": true, "flush_routes": false}
+end_state:
+ description: k/v pairs of IGMP configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"enforce_rtr_alert": true, "flush_routes": true}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["ip igmp flush-routes"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+PARAM_TO_COMMAND_KEYMAP = {
+ 'flush_routes': 'ip igmp flush-routes',
+ 'enforce_rtr_alert': 'ip igmp enforce-router-alert'
+}
+
+
+def get_value(arg, config):
+ REGEX = re.compile(r'{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+ try:
+ if REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ config = str(get_config(module))
+
+ for arg in args:
+ existing[arg] = get_value(arg, config)
+ return existing
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_commands(module, existing, proposed, candidate):
+ commands = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+ if module.params['state'] == 'default':
+ for key, value in proposed_commands.iteritems():
+ if existing_commands.get(key):
+ commands.append('no {0}'.format(key))
+ else:
+ for key, value in proposed_commands.iteritems():
+ if value is True:
+ commands.append(key)
+ else:
+ if existing_commands.get(key):
+ commands.append('no {0}'.format(key))
+
+ if module.params['restart']:
+ commands.append('restart igmp')
+
+ if commands:
+ parents = []
+ candidate.add(commands, parents=parents)
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def main():
+ argument_spec = dict(
+ flush_routes=dict(type='bool'),
+ enforce_rtr_alert=dict(type='bool'),
+ restart=dict(type='bool', default=False),
+ state=dict(choices=['present', 'default'], default='present'),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ restart = module.params['restart']
+
+ if (state == 'default' and (module.params['flush_routes'] is not None or
+ module.params['enforce_rtr_alert'] is not None)):
+ module.fail_json(msg='When state=default other params have no effect.')
+
+ args = [
+ "flush_routes",
+ "enforce_rtr_alert",
+ ]
+
+ existing = invoke('get_existing', module, args)
+ end_state = existing
+
+ proposed = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ proposed_args = proposed.copy()
+ if state == 'default':
+ proposed_args = dict((k, False) for k in args)
+
+ result = {}
+ if (state == 'present' or (state == 'default' and
+ True in existing.values()) or restart):
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('get_commands', module, existing, proposed_args, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ if restart:
+ proposed['restart'] = restart
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_igmp_interface.py b/network/nxos/nxos_igmp_interface.py
new file mode 100644
index 00000000000..d5e3226d346
--- /dev/null
+++ b/network/nxos/nxos_igmp_interface.py
@@ -0,0 +1,910 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_igmp_interface
+version_added: "2.2"
+short_description: Manages IGMP interface configuration.
+description:
+ - Manages IGMP interface configuration settings.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - When C(state=default), supported params will be reset to a default state.
+ These include C(version), C(startup_query_interval),
+ C(startup_query_count), C(robustness), C(querier_timeout), C(query_mrt),
+ C(query_interval), C(last_member_qrt), C(last_member_query_count),
+ C(group_timeout), C(report_llg), and C(immediate_leave).
+ - When C(state=absent), all configs for C(oif_prefix), C(oif_source), and
+ C(oif_routemap) will be removed.
+ - PIM must be enabled to use this module.
+ - This module is for Layer 3 interfaces.
+ - Route-map check not performed (same as CLI) check when configuring
+ route-map with 'static-oif'
+ - If restart is set to true with other params set, the restart will happen
+ last, i.e. after the configuration takes place.
+options:
+ interface:
+ description:
+ - The full interface name for IGMP configuration.
+ e.g. I(Ethernet1/2).
+ required: true
+ version:
+ description:
+ - IGMP version. It can be 2 or 3.
+ required: false
+ default: null
+ choices: ['2', '3']
+ startup_query_interval:
+ description:
+ - Query interval used when the IGMP process starts up.
+ The range is from 1 to 18000. The default is 31.
+ required: false
+ default: null
+ startup_query_count:
+ description:
+ - Query count used when the IGMP process starts up.
+ The range is from 1 to 10. The default is 2.
+ required: false
+ default: null
+ robustness:
+ description:
+ - Sets the robustness variable. Values can range from 1 to 7.
+ The default is 2.
+ required: false
+ default: null
+ querier_timeout:
+ description:
+ - Sets the querier timeout that the software uses when deciding
+ to take over as the querier. Values can range from 1 to 65535
+ seconds. The default is 255 seconds.
+ required: false
+ default: null
+ query_mrt:
+ description:
+ - Sets the response time advertised in IGMP queries.
+ Values can range from 1 to 25 seconds. The default is 10 seconds.
+ required: false
+ default: null
+ query_interval:
+ description:
+ - Sets the frequency at which the software sends IGMP host query
+ messages. Values can range from 1 to 18000 seconds.
+ he default is 125 seconds.
+ required: false
+ default: null
+ last_member_qrt:
+ description:
+ - Sets the query interval waited after sending membership reports
+ before the software deletes the group state. Values can range
+ from 1 to 25 seconds. The default is 1 second.
+ required: false
+ default: null
+ last_member_query_count:
+ description:
+ - Sets the number of times that the software sends an IGMP query
+ in response to a host leave message.
+ Values can range from 1 to 5. The default is 2.
+ required: false
+ default: null
+ group_timeout:
+ description:
+ - Sets the group membership timeout for IGMPv2.
+ Values can range from 3 to 65,535 seconds.
+ The default is 260 seconds.
+ required: false
+ default: null
+ report_llg:
+ description:
+ - Configures report-link-local-groups.
+ Enables sending reports for groups in 224.0.0.0/24.
+ Reports are always sent for nonlink local groups.
+ By default, reports are not sent for link local groups.
+ required: false
+ choices: ['true', 'false']
+ default: false
+ immediate_leave:
+ description:
+ - Enables the device to remove the group entry from the multicast
+ routing table immediately upon receiving a leave message for
+ the group. Use this command to minimize the leave latency of
+ IGMPv2 group memberships on a given IGMP interface because the
+ device does not send group-specific queries.
+ The default is disabled.
+ required: false
+ choices: ['true', 'false']
+ default: false
+ oif_routemap:
+ description:
+ - Configure a routemap for static outgoing interface (OIF).
+ required: false
+ default: null
+ oif_prefix:
+ description:
+ - Configure a prefix for static outgoing interface (OIF).
+ required: false
+ default: null
+ oif_source:
+ description:
+ - Configure a source for static outgoing interface (OIF).
+ required: false
+ default: null
+ restart:
+ description:
+ - Restart IGMP.
+ required: false
+ choices: ['true', 'false']
+ default: null
+ state:
+ description:
+ - Manages desired state of the resource.
+ required: false
+ default: present
+ choices: ['present', 'default']
+'''
+EXAMPLES = '''
+- nxos_igmp_interface:
+ interface: ethernet1/32
+ startup_query_interval: 30
+ state: present
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"}
+existing:
+ description: k/v pairs of existing BGP configuration
+ type: dict
+ sample: {"asn": "65535", "bestpath_always_compare_med": false,
+ "bestpath_aspath_multipath_relax": false,
+ "bestpath_compare_neighborid": false,
+ "bestpath_compare_routerid": false,
+ "bestpath_cost_community_ignore": false,
+ "bestpath_med_confed": false,
+ "bestpath_med_missing_as_worst": false,
+ "bestpath_med_non_deterministic": false, "cluster_id": "",
+ "confederation_id": "", "confederation_peers": "",
+ "graceful_restart": true, "graceful_restart_helper": false,
+ "graceful_restart_timers_restart": "120",
+ "graceful_restart_timers_stalepath_time": "300", "local_as": "",
+ "log_neighbor_changes": false, "maxas_limit": "",
+ "neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
+ "router_id": "11.11.11.11", "suppress_fib_pending": false,
+ "timer_bestpath_limit": "", "timer_bgp_hold": "180",
+ "timer_bgp_keepalive": "60", "vrf": "test"}
+end_state:
+ description: k/v pairs of BGP configuration after module execution
+ returned: always
+ type: dict
+ sample: {"asn": "65535", "bestpath_always_compare_med": false,
+ "bestpath_aspath_multipath_relax": false,
+ "bestpath_compare_neighborid": false,
+ "bestpath_compare_routerid": false,
+ "bestpath_cost_community_ignore": false,
+ "bestpath_med_confed": false,
+ "bestpath_med_missing_as_worst": false,
+ "bestpath_med_non_deterministic": false, "cluster_id": "",
+ "confederation_id": "", "confederation_peers": "",
+ "graceful_restart": true, "graceful_restart_helper": false,
+ "graceful_restart_timers_restart": "120",
+ "graceful_restart_timers_stalepath_time": "300", "local_as": "",
+ "log_neighbor_changes": false, "maxas_limit": "",
+ "neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
+ "router_id": "1.1.1.1", "suppress_fib_pending": false,
+ "timer_bestpath_limit": "", "timer_bgp_hold": "180",
+ "timer_bgp_keepalive": "60", "vrf": "test"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import json
+import collections
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0]:
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ response = response[0].replace(command + '\n\n', '').strip()
+ body = [json.loads(response)]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def get_interface_mode(interface, intf_type, module):
+ command = 'show interface {0}'.format(interface)
+ interface = {}
+ mode = 'unknown'
+
+ if intf_type in ['ethernet', 'portchannel']:
+ body = execute_show_command(command, module)[0]
+ interface_table = body['TABLE_interface']['ROW_interface']
+ mode = str(interface_table.get('eth_mode', 'layer3'))
+ if mode == 'access' or mode == 'trunk':
+ mode = 'layer2'
+ elif intf_type == 'loopback' or intf_type == 'svi':
+ mode = 'layer3'
+ return mode
+
+
+def get_interface_type(interface):
+ if interface.upper().startswith('ET'):
+ return 'ethernet'
+ elif interface.upper().startswith('VL'):
+ return 'svi'
+ elif interface.upper().startswith('LO'):
+ return 'loopback'
+ elif interface.upper().startswith('MG'):
+ return 'management'
+ elif interface.upper().startswith('MA'):
+ return 'management'
+ elif interface.upper().startswith('PO'):
+ return 'portchannel'
+ else:
+ return 'unknown'
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_igmp_interface(module, interface):
+ command = 'show ip igmp interface {0}'.format(interface)
+ igmp = {}
+
+ key_map = {
+ 'IGMPVersion': 'version',
+ 'ConfiguredStartupQueryInterval': 'startup_query_interval',
+ 'StartupQueryCount': 'startup_query_count',
+ 'RobustnessVariable': 'robustness',
+ 'QuerierTimeout': 'querier_timeout',
+ 'ConfiguredMaxResponseTime': 'query_mrt',
+ 'ConfiguredQueryInterval': 'query_interval',
+ 'LastMemberMTR': 'last_member_qrt',
+ 'LastMemberQueryCount': 'last_member_query_count',
+ 'ConfiguredGroupTimeout': 'group_timeout'
+ }
+
+ body = execute_show_command(command, module)[0]
+
+ if body:
+ resource = body['TABLE_vrf']['ROW_vrf']['TABLE_if']['ROW_if']
+ igmp = apply_key_map(key_map, resource)
+ report_llg = str(resource['ReportingForLinkLocal'])
+ if report_llg == 'true':
+ igmp['report_llg'] = True
+ elif report_llg == 'false':
+ igmp['report_llg'] = False
+
+ immediate_leave = str(resource['ImmediateLeave']) # returns en or dis
+ if immediate_leave == 'en':
+ igmp['immediate_leave'] = True
+ elif immediate_leave == 'dis':
+ igmp['immediate_leave'] = False
+
+ # the next block of code is used to retrieve anything with:
+ # ip igmp static-oif *** i.e.. could be route-map ROUTEMAP
+ # or PREFIX source , etc.
+ command = 'show run interface {0} | inc oif'.format(interface)
+
+ body = execute_show_command(
+ command, module, command_type='cli_show_ascii')[0]
+
+ staticoif = []
+ if body:
+ split_body = body.split('\n')
+ route_map_regex = ('.*ip igmp static-oif route-map\s+'
+ '(?P\S+).*')
+ prefix_source_regex = ('.*ip igmp static-oif\s+(?P'
+ '((\d+.){3}\d+))(\ssource\s'
+ '(?P\S+))?.*')
+
+ for line in split_body:
+ temp = {}
+ try:
+ match_route_map = re.match(route_map_regex, line, re.DOTALL)
+ route_map = match_route_map.groupdict()['route_map']
+ except AttributeError:
+ route_map = ''
+
+ try:
+ match_prefix_source = re.match(
+ prefix_source_regex, line, re.DOTALL)
+ prefix_source_group = match_prefix_source.groupdict()
+ prefix = prefix_source_group['prefix']
+ source = prefix_source_group['source']
+ except AttributeError:
+ prefix = ''
+ source = ''
+
+ if route_map:
+ temp['route_map'] = route_map
+ if prefix:
+ temp['prefix'] = prefix
+ if source:
+ temp['source'] = source
+ if temp:
+ staticoif.append(temp)
+
+ igmp['oif_routemap'] = None
+ igmp['oif_prefix_source'] = []
+
+ if staticoif:
+ if len(staticoif) == 1 and staticoif[0].get('route_map'):
+ igmp['oif_routemap'] = staticoif[0]['route_map']
+ else:
+ igmp['oif_prefix_source'] = staticoif
+
+ return igmp
+
+
+def config_igmp_interface(delta, found_both, found_prefix):
+ CMDS = {
+ 'version': 'ip igmp version {0}',
+ 'startup_query_interval': 'ip igmp startup-query-interval {0}',
+ 'startup_query_count': 'ip igmp startup-query-count {0}',
+ 'robustness': 'ip igmp robustness-variable {0}',
+ 'querier_timeout': 'ip igmp querier-timeout {0}',
+ 'query_mrt': 'ip igmp query-max-response-time {0}',
+ 'query_interval': 'ip igmp query-interval {0}',
+ 'last_member_qrt': 'ip igmp last-member-query-response-time {0}',
+ 'last_member_query_count': 'ip igmp last-member-query-count {0}',
+ 'group_timeout': 'ip igmp group-timeout {0}',
+ 'report_llg': 'ip igmp report-link-local-groups',
+ 'immediate_leave': 'ip igmp immediate-leave',
+ 'oif_prefix_source': 'ip igmp static-oif {0} source {1} ',
+ 'oif_routemap': 'ip igmp static-oif route-map {0}',
+ 'oif_prefix': 'ip igmp static-oif {0}',
+ }
+
+ commands = []
+ command = None
+
+ for key, value in delta.iteritems():
+ if key == 'oif_source' or found_both or found_prefix:
+ pass
+ elif key == 'oif_prefix':
+ if delta.get('oif_source'):
+ command = CMDS.get('oif_prefix_source').format(
+ delta.get('oif_prefix'), delta.get('oif_source'))
+ else:
+ command = CMDS.get('oif_prefix').format(
+ delta.get('oif_prefix'))
+ elif value:
+ command = CMDS.get(key).format(value)
+ elif not value:
+ command = 'no {0}'.format(CMDS.get(key).format(value))
+
+ if command:
+ if command not in commands:
+ commands.append(command)
+ command = None
+
+ return commands
+
+
+def get_igmp_interface_defaults():
+ version = '2'
+ startup_query_interval = '31'
+ startup_query_count = '2'
+ robustness = '2'
+ querier_timeout = '255'
+ query_mrt = '10'
+ query_interval = '125'
+ last_member_qrt = '1'
+ last_member_query_count = '2'
+ group_timeout = '260'
+ report_llg = False
+ immediate_leave = False
+
+ args = dict(version=version, startup_query_interval=startup_query_interval,
+ startup_query_count=startup_query_count, robustness=robustness,
+ querier_timeout=querier_timeout, query_mrt=query_mrt,
+ query_interval=query_interval, last_member_qrt=last_member_qrt,
+ last_member_query_count=last_member_query_count,
+ group_timeout=group_timeout, report_llg=report_llg,
+ immediate_leave=immediate_leave)
+
+ default = dict((param, value) for (param, value) in args.iteritems()
+ if value is not None)
+
+ return default
+
+
+def config_default_igmp_interface(existing, delta, found_both, found_prefix):
+ commands = []
+ proposed = get_igmp_interface_defaults()
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+ if delta:
+ command = config_igmp_interface(delta, found_both, found_prefix)
+
+ if command:
+ for each in command:
+ commands.append(each)
+
+ return commands
+
+
+def config_remove_oif(existing, existing_oif_prefix_source):
+ commands = []
+ command = None
+ if existing.get('routemap'):
+ command = 'no ip igmp static-oif route-map {0}'.format(
+ existing.get('routemap'))
+ if existing_oif_prefix_source:
+ for each in existing_oif_prefix_source:
+ if each.get('prefix') and each.get('source'):
+ command = 'no ip igmp static-oif {0} source {1} '.format(
+ each.get('prefix'), each.get('source')
+ )
+ elif each.get('prefix'):
+ command = 'no ip igmp static-oif {0}'.format(
+ each.get('prefix')
+ )
+ if command:
+ commands.append(command)
+ command = None
+
+ return commands
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def main():
+ argument_spec = dict(
+ interface=dict(required=True, type='str'),
+ version=dict(required=False, type='str'),
+ startup_query_interval=dict(required=False, type='str'),
+ startup_query_count=dict(required=False, type='str'),
+ robustness=dict(required=False, type='str'),
+ querier_timeout=dict(required=False, type='str'),
+ query_mrt=dict(required=False, type='str'),
+ query_interval=dict(required=False, type='str'),
+ last_member_qrt=dict(required=False, type='str'),
+ last_member_query_count=dict(required=False, type='str'),
+ group_timeout=dict(required=False, type='str'),
+ report_llg=dict(type='bool'),
+ immediate_leave=dict(type='bool'),
+ oif_routemap=dict(required=False, type='str'),
+ oif_prefix=dict(required=False, type='str'),
+ oif_source=dict(required=False, type='str'),
+ restart=dict(type='bool', default=False),
+ state=dict(choices=['present', 'absent', 'default'],
+ default='present'),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ interface = module.params['interface']
+ oif_prefix = module.params['oif_prefix']
+ oif_source = module.params['oif_source']
+ oif_routemap = module.params['oif_routemap']
+
+ if oif_source:
+ if not oif_prefix:
+ module.fail_json(msg='oif_prefix required when setting oif_source')
+
+ intf_type = get_interface_type(interface)
+ if get_interface_mode(interface, intf_type, module) == 'layer2':
+ module.fail_json(msg='this module only works on Layer 3 interfaces')
+
+ if oif_prefix and oif_routemap:
+ module.fail_json(msg='cannot use oif_prefix AND oif_routemap.'
+ ' select one.')
+
+ existing = get_igmp_interface(module, interface)
+ existing_copy = existing.copy()
+ end_state = existing_copy
+
+ if not existing.get('version'):
+ module.fail_json(msg='pim needs to be enabled on the interface')
+
+ existing_oif_prefix_source = existing.get('oif_prefix_source')
+ # not json serializable
+ existing.pop('oif_prefix_source')
+
+ if oif_routemap and existing_oif_prefix_source:
+ module.fail_json(msg='Delete static-oif configurations on this '
+ 'interface if you want to use a routemap')
+
+ if oif_prefix and existing.get('oif_routemap'):
+ module.fail_json(msg='Delete static-oif route-map configuration '
+ 'on this interface if you want to config '
+ 'static entries')
+
+ args = [
+ 'version',
+ 'startup_query_interval',
+ 'startup_query_count',
+ 'robustness',
+ 'querier_timeout',
+ 'query_mrt',
+ 'query_interval',
+ 'last_member_qrt',
+ 'last_member_query_count',
+ 'group_timeout',
+ 'report_llg',
+ 'immediate_leave',
+ 'oif_routemap',
+ 'oif_prefix',
+ 'oif_source'
+ ]
+
+ changed = False
+ commands = []
+ proposed = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ CANNOT_ABSENT = ['version', 'startup_query_interval',
+ 'startup_query_count', 'robustness', 'querier_timeout',
+ 'query_mrt', 'query_interval', 'last_member_qrt',
+ 'last_member_query_count', 'group_timeout', 'report_llg',
+ 'immediate_leave']
+
+ if state == 'absent':
+ for each in CANNOT_ABSENT:
+ if each in proposed:
+ module.fail_json(msg='only params: oif_prefix, oif_source, '
+ 'oif_routemap can be used when '
+ 'state=absent')
+
+ # delta check for all params except oif_prefix and oif_source
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+
+ # now check to see there is a delta for prefix and source command option
+ found_both = False
+ found_prefix = False
+
+ if existing_oif_prefix_source:
+ if oif_prefix and oif_source:
+ for each in existing_oif_prefix_source:
+ if (oif_prefix == each.get('prefix') and
+ oif_source == each.get('source')):
+ found_both = True
+ if not found_both:
+ delta['prefix'] = oif_prefix
+ delta['source'] = oif_source
+ elif oif_prefix:
+ for each in existing_oif_prefix_source:
+ if oif_prefix == each.get('prefix') and not each.get('source'):
+ found_prefix = True
+ if not found_prefix:
+ delta['prefix'] = oif_prefix
+
+ if state == 'present':
+ if delta:
+ command = config_igmp_interface(delta, found_both, found_prefix)
+ if command:
+ commands.append(command)
+
+ elif state == 'default':
+ command = config_default_igmp_interface(existing, delta,
+ found_both, found_prefix)
+ if command:
+ commands.append(command)
+ elif state == 'absent':
+ command = None
+ if existing.get('oif_routemap') or existing_oif_prefix_source:
+ command = config_remove_oif(existing, existing_oif_prefix_source)
+
+ if command:
+ commands.append(command)
+
+ command = config_default_igmp_interface(existing, delta,
+ found_both, found_prefix)
+ if command:
+ commands.append(command)
+
+ if module.params['restart']:
+ commands.append('restart igmp')
+
+ cmds = []
+ results = {}
+ if commands:
+ commands.insert(0, ['interface {0}'.format(interface)])
+ cmds = flatten_list(commands)
+
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ execute_config_command(cmds, module)
+ changed = True
+ end_state = get_igmp_interface(module, interface)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results['proposed'] = proposed
+ results['existing'] = existing_copy
+ results['updates'] = cmds
+ results['changed'] = changed
+ results['end_state'] = end_state
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/network/nxos/nxos_igmp_snooping.py b/network/nxos/nxos_igmp_snooping.py
new file mode 100644
index 00000000000..7044adecd16
--- /dev/null
+++ b/network/nxos/nxos_igmp_snooping.py
@@ -0,0 +1,556 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_igmp_snooping
+version_added: "2.2"
+short_description: Manages IGMP snooping global configuration.
+description:
+ - Manages IGMP snooping global configuration.
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - When C(state=default), params will be reset to a default state.
+ - C(group_timeout) also accepts I(never) as an input.
+options:
+ snooping:
+ description:
+ - Enables/disables IGMP snooping on the switch.
+ required: false
+ default: null
+ choices: ['true', 'false']
+ group_timeout:
+ description:
+ - Group membership timeout value for all VLANs on the device.
+ Accepted values are integer in range 1-10080, I(never) and
+ I(default).
+ required: false
+ default: null
+ link_local_grp_supp:
+ description:
+ - Global link-local groups suppression.
+ required: false
+ default: null
+ choices: ['true', 'false']
+ report_supp:
+ description:
+ - Global IGMPv1/IGMPv2 Report Suppression.
+ required: false
+ default: null
+ v3_report_supp:
+ description:
+ - Global IGMPv3 Report Suppression and Proxy Reporting.
+ required: false
+ default: null
+ choices: ['true', 'false']
+ state:
+ description:
+ - Manage the state of the resource.
+ required: false
+ default: present
+ choices: ['present','default']
+'''
+
+EXAMPLES = '''
+# ensure igmp snooping params supported in this module are in there default state
+- nxos_igmp_snooping:
+ state: default
+ host: inventory_hostname }}
+ username: un }}
+ password: pwd }}
+
+# ensure following igmp snooping params are in the desired state
+- nxos_igmp_snooping:
+ group_timeout: never
+ snooping: true
+ link_local_grp_supp: false
+ optimize_mcast_flood: false
+ report_supp: true
+ v3_report_supp: true
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"group_timeout": "50", "link_local_grp_supp": true,
+ "report_supp": false, "snooping": false, "v3_report_supp": false}
+existing:
+ description:
+ - k/v pairs of existing configuration
+ type: dict
+ sample: {"group_timeout": "never", "link_local_grp_supp": false,
+ "report_supp": true, "snooping": true, "v3_report_supp": true}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: always
+ type: dict
+ sample: {"group_timeout": "50", "link_local_grp_supp": true,
+ "report_supp": false, "snooping": false, "v3_report_supp": false}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["ip igmp snooping link-local-groups-suppression",
+ "ip igmp snooping group-timeout 50",
+ "no ip igmp snooping report-suppression",
+ "no ip igmp snooping v3-report-suppression",
+ "no ip igmp snooping"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0]:
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ if isinstance(response[0], str):
+ response = response[0].replace(command + '\n\n', '').strip()
+ body = [json.loads(response[0])]
+ else:
+ body = response
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_group_timeout(config):
+ command = 'ip igmp snooping group-timeout'
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(command), re.M)
+ value = ''
+ if command in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def get_snooping(config):
+ REGEX = re.compile(r'{0}$'.format('no ip igmp snooping'), re.M)
+ value = False
+ try:
+ if REGEX.search(config):
+ value = False
+ except TypeError:
+ value = True
+ return value
+
+
+def get_igmp_snooping(module):
+ command = 'show run all | include igmp.snooping'
+ existing = {}
+ body = execute_show_command(
+ command, module, command_type='cli_show_ascii')[0]
+
+ if body:
+ split_body = body.splitlines()
+
+ if 'no ip igmp snooping' in split_body:
+ existing['snooping'] = False
+ else:
+ existing['snooping'] = True
+
+ if 'no ip igmp snooping report-suppression' in split_body:
+ existing['report_supp'] = False
+ elif 'ip igmp snooping report-suppression' in split_body:
+ existing['report_supp'] = True
+
+ if 'no ip igmp snooping link-local-groups-suppression' in split_body:
+ existing['link_local_grp_supp'] = False
+ elif 'ip igmp snooping link-local-groups-suppression' in split_body:
+ existing['link_local_grp_supp'] = True
+
+ if 'ip igmp snooping v3-report-suppression' in split_body:
+ existing['v3_report_supp'] = True
+ else:
+ existing['v3_report_supp'] = False
+
+ existing['group_timeout'] = get_group_timeout(body)
+
+ return existing
+
+
+def config_igmp_snooping(delta, existing, default=False):
+ CMDS = {
+ 'snooping': 'ip igmp snooping',
+ 'group_timeout': 'ip igmp snooping group-timeout {}',
+ 'link_local_grp_supp': 'ip igmp snooping link-local-groups-suppression',
+ 'v3_report_supp': 'ip igmp snooping v3-report-suppression',
+ 'report_supp': 'ip igmp snooping report-suppression'
+ }
+
+ commands = []
+ command = None
+ for key, value in delta.iteritems():
+ if value:
+ if default and key == 'group_timeout':
+ if existing.get(key):
+ command = 'no ' + CMDS.get(key).format(existing.get(key))
+ else:
+ command = CMDS.get(key).format(value)
+ else:
+ command = 'no ' + CMDS.get(key).format(value)
+
+ if command:
+ commands.append(command)
+ command = None
+
+ return commands
+
+
+def get_igmp_snooping_defaults():
+ group_timeout = 'dummy'
+ report_supp = True
+ link_local_grp_supp = True
+ v3_report_supp = False
+ snooping = True
+
+ args = dict(snooping=snooping, link_local_grp_supp=link_local_grp_supp,
+ report_supp=report_supp, v3_report_supp=v3_report_supp,
+ group_timeout=group_timeout)
+
+ default = dict((param, value) for (param, value) in args.iteritems()
+ if value is not None)
+
+ return default
+
+
+def main():
+ argument_spec = dict(
+ snooping=dict(required=False, type='bool'),
+ group_timeout=dict(required=False, type='str'),
+ link_local_grp_supp=dict(required=False, type='bool'),
+ report_supp=dict(required=False, type='bool'),
+ v3_report_supp=dict(required=False, type='bool'),
+ state=dict(choices=['present', 'default'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ snooping = module.params['snooping']
+ link_local_grp_supp = module.params['link_local_grp_supp']
+ report_supp = module.params['report_supp']
+ v3_report_supp = module.params['v3_report_supp']
+ group_timeout = module.params['group_timeout']
+ state = module.params['state']
+
+ args = dict(snooping=snooping, link_local_grp_supp=link_local_grp_supp,
+ report_supp=report_supp, v3_report_supp=v3_report_supp,
+ group_timeout=group_timeout)
+
+ proposed = dict((param, value) for (param, value) in args.iteritems()
+ if value is not None)
+
+ existing = get_igmp_snooping(module)
+ end_state = existing
+ changed = False
+
+ commands = []
+ if state == 'present':
+ delta = dict(
+ set(proposed.iteritems()).difference(existing.iteritems())
+ )
+ if delta:
+ command = config_igmp_snooping(delta, existing)
+ if command:
+ commands.append(command)
+ elif state == 'default':
+ proposed = get_igmp_snooping_defaults()
+ delta = dict(
+ set(proposed.iteritems()).difference(existing.iteritems())
+ )
+ if delta:
+ command = config_igmp_snooping(delta, existing, default=True)
+ if command:
+ commands.append(command)
+
+ cmds = flatten_list(commands)
+ results = {}
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_igmp_snooping(module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['updates'] = cmds
+ results['changed'] = changed
+ results['end_state'] = end_state
+
+ module.exit_json(**results)
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_install_os.py b/network/nxos/nxos_install_os.py
new file mode 100644
index 00000000000..aa0c0505437
--- /dev/null
+++ b/network/nxos/nxos_install_os.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_install_os
+short_description: Set boot options like boot image and kickstart image.
+description:
+ - Install an operating system by setting the boot options like boot
+ image and kickstart image.
+notes:
+ - The module will fail due to timeout issues, but the install will go on
+ anyway. Ansible's block and rescue can be leveraged to handle this kind
+ of failure and check actual module results. See EXAMPLE for more about
+ this. The first task on the rescue block is needed to make sure the
+ device has completed all checks and it started to reboot. The second
+ task is needed to wait for the device to come back up. The last two tasks
+ are used to verify the installation process was successful.
+ - Do not include full file paths, just the name of the file(s) stored on
+ the top level flash directory.
+ - You must know if your platform supports taking a kickstart image as a
+ parameter. If supplied but not supported, errors may occur.
+ - This module attempts to install the software immediately,
+ which may trigger a reboot.
+ - In check mode, the module tells you if the current boot images are set
+ to the desired images.
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbibo (@GGabriele)
+version_added: 2.2
+options:
+ system_image_file:
+ description:
+ - Name of the system (or combined) image file on flash.
+ required: true
+ kickstart_image_file:
+ description:
+ - Name of the kickstart image file on flash.
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+- block:
+ - name: Install OS
+ nxos_install_os:
+ system_image_file: nxos.7.0.3.I2.2d.bin
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ transport: nxapi
+ rescue:
+ - name: Wait for device to perform checks
+ wait_for:
+ port: 22
+ state: stopped
+ timeout: 300
+ delay: 60
+ host: "{{ inventory_hostname }}"
+ - name: Wait for device to come back up
+ wait_for:
+ port: 22
+ state: started
+ timeout: 300
+ delay: 60
+ host: "{{ inventory_hostname }}"
+ - name: Check installed OS
+ nxos_command:
+ commands:
+ - show version
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+ transport: nxapi
+ register: output
+ - assert:
+ that:
+ - output['stdout'][0]['kickstart_ver_str'] == '7.0(3)I4(1)'
+'''
+
+RETURN = '''
+install_state:
+ returned: always
+ type: dictionary
+ sample: {
+ "kick": "n5000-uk9-kickstart.7.2.1.N1.1.bin",
+ "sys": "n5000-uk9.7.2.1.N1.1.bin",
+ "status": "This is the log of last installation.\n
+ Continuing with installation process, please wait.\n
+ The login will be disabled until the installation is completed.\n
+ Performing supervisor state verification. \n
+ SUCCESS\n
+ Supervisor non-disruptive upgrade successful.\n
+ Install has been successful.\n",
+ }
+'''
+
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show_ascii'):
+ cmds = [command]
+ if module.params['transport'] == 'cli':
+ body = execute_show(cmds, module)
+ elif module.params['transport'] == 'nxapi':
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_boot_options(module):
+ """Get current boot variables
+ like system image and kickstart image.
+ Returns:
+ A dictionary, e.g. { 'kick': router_kick.img, 'sys': 'router_sys.img'}
+ """
+ command = 'show boot'
+ body = execute_show_command(command, module)[0]
+ boot_options_raw_text = body.split('Boot Variables on next reload')[1]
+
+ if 'kickstart' in boot_options_raw_text:
+ kick_regex = r'kickstart variable = bootflash:/(\S+)'
+ sys_regex = r'system variable = bootflash:/(\S+)'
+
+ kick = re.search(kick_regex, boot_options_raw_text).group(1)
+ sys = re.search(sys_regex, boot_options_raw_text).group(1)
+ retdict = dict(kick=kick, sys=sys)
+ else:
+ nxos_regex = r'NXOS variable = bootflash:/(\S+)'
+ nxos = re.search(nxos_regex, boot_options_raw_text).group(1)
+ retdict = dict(sys=nxos)
+
+ command = 'show install all status'
+ retdict['status'] = execute_show_command(command, module)[0]
+
+ return retdict
+
+
+def already_set(current_boot_options, system_image_file, kickstart_image_file):
+ return current_boot_options.get('sys') == system_image_file \
+ and current_boot_options.get('kick') == kickstart_image_file
+
+
+def set_boot_options(module, image_name, kickstart=None):
+ """Set boot variables
+ like system image and kickstart image.
+ Args:
+ The main system image file name.
+ Keyword Args: many implementors may choose
+ to supply a kickstart parameter to specify a kickstart image.
+ """
+ commands = ['terminal dont-ask']
+ if kickstart is None:
+ commands.append('install all nxos %s' % image_name)
+ else:
+ commands.append(
+ 'install all system %s kickstart %s' % (image_name, kickstart))
+ execute_config_command(commands, module)
+
+
+def main():
+ argument_spec = dict(
+ system_image_file=dict(required=True),
+ kickstart_image_file=dict(required=False),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ system_image_file = module.params['system_image_file']
+ kickstart_image_file = module.params['kickstart_image_file']
+
+ if kickstart_image_file == 'null':
+ kickstart_image_file = None
+
+ current_boot_options = get_boot_options(module)
+ changed = False
+ if not already_set(current_boot_options,
+ system_image_file,
+ kickstart_image_file):
+ changed = True
+
+ if not module.check_mode and changed == True:
+ set_boot_options(module,
+ system_image_file,
+ kickstart=kickstart_image_file)
+
+ if not already_set(install_state,
+ system_image_file,
+ kickstart_image_file):
+ module.fail_json(msg='Install not successful',
+ install_state=install_state)
+ else:
+ install_state = current_boot_options
+
+ module.exit_json(changed=changed, install_state=install_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_interface.py b/network/nxos/nxos_interface.py
index 46b9f4ef8e4..b65fb9d3109 100644
--- a/network/nxos/nxos_interface.py
+++ b/network/nxos/nxos_interface.py
@@ -16,14 +16,17 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
-
module: nxos_interface
version_added: "2.1"
-short_description: Manages physical attributes of interfaces
+short_description: Manages physical attributes of interfaces.
description:
- - Manages physical attributes of interfaces of NX-OS switches
+ - Manages physical attributes of interfaces of NX-OS switches.
author: Jason Edelman (@jedelman8)
notes:
- This module is also used to create logical interfaces such as
@@ -31,12 +34,21 @@
- Be cautious of platform specific idiosyncrasies. For example,
when you default a loopback interface, the admin state toggles
on certain versions of NX-OS.
+ - The M(nxos_overlay_global) C(anycast_gateway_mac) attribute must be
+ set before setting the C(fabric_forwarding_anycast_gateway) property.
options:
interface:
description:
- Full name of interface, i.e. Ethernet1/1, port-channel10.
required: true
default: null
+ interface_type:
+ description:
+ - Interface type to be unconfigured from the device.
+ required: false
+ default: null
+ choices: ['loopback', 'portchannel', 'svi', 'nve']
+ version_added: "2.2"
admin_state:
description:
- Administrative state of the interface.
@@ -50,10 +62,24 @@
default: null
mode:
description:
- - Manage I(Layer2) or I(Layer3) state of the interface.
+ - Manage Layer 2 or Layer 3 state of the interface.
required: false
default: null
choices: ['layer2','layer3']
+ ip_forward:
+ description:
+ - Enable/Disable ip forward feature on SVIs.
+ required: false
+ default: null
+ choices: ['enable','disable']
+ version_added: "2.2"
+ fabric_forwarding_anycast_gateway:
+ description:
+ - Associate SVI with anycast gateway under VLAN configuration mode.
+ required: false
+ default: null
+ choices: ['true','false']
+ version_added: "2.2"
state:
description:
- Specify desired state of the resource.
@@ -63,31 +89,49 @@
'''
EXAMPLES = '''
-# Ensure an interface is a Layer 3 port and that it has the proper description
-- nxos_interface: interface=Ethernet1/1 description='Configured by Ansible' mode=layer3 host={{ inventory_hostname }}
-
-# Admin down an interface
-- nxos_interface: interface=Ethernet2/1 host={{ inventory_hostname }} admin_state=down
+- name Ensure an interface is a Layer 3 port and that it has the proper description
+ nxos_interface:
+ interface: Ethernet1/1
+ description: 'Configured by Ansible'
+ mode: layer3
+ host: 68.170.147.165
+
+- name Admin down an interface
+ nxos_interface:
+ interface: Ethernet2/1
+ host: 68.170.147.165
+ admin_state: down
+
+- name Remove all loopback interfaces
+ nxos_interface:
+ interface: loopback
+ state: absent
+ host: 68.170.147.165
+
+- name Remove all logical interfaces
+ nxos_interface:
+ interface_type: "{{ item }} "
+ state: absent
+ host: "{{ inventory_hostname }}"
-# Remove all loopback interfaces
-- nxos_interface: interface=loopback state=absent host={{ inventory_hostname }}
-
-# Remove all logical interfaces
-- nxos_interface: interface={{ item }} state=absent host={{ inventory_hostname }}
with_items:
- loopback
- portchannel
- svi
-
-# Admin up all ethernet interfaces
-- nxos_interface: interface=ethernet host={{ inventory_hostname }} admin_state=up
-
-# Admin down ALL interfaces (physical and logical)
-- nxos_interface: interface=all host={{ inventory_hostname }} admin_state=down
-
+ - nve
+- name Admin up all ethernet interfaces
+ nxos_interface:
+ interface: ethernet
+ host: 68.170.147.165
+ admin_state: up
+
+- name Admin down ALL interfaces (physical and logical)
+ nxos_interface:
+ interface: all
+ host: 68.170.147.165
+ admin_state: down
'''
RETURN = '''
-
proposed:
description: k/v pairs of parameters passed into module
returned: always
@@ -96,17 +140,16 @@
existing:
description: k/v pairs of existing switchport
type: dict
- sample: {"admin_state": "up", "description": "None", "interface": "port-channel101", "mode": "layer2", "type": "portchannel"}
+ sample: {"admin_state": "up", "description": "None",
+ "interface": "port-channel101", "mode": "layer2",
+ "type": "portchannel", "ip_forward": "enable"}
end_state:
description: k/v pairs of switchport after module execution
returned: always
type: dict or null
- sample: {"admin_state": "down", "description": "None", "interface": "port-channel101", "mode": "layer2", "type": "portchannel"}
-state:
- description: state as sent in from the playbook
- returned: always
- type: string
- sample: "present"
+ sample: {"admin_state": "down", "description": "None",
+ "interface": "port-channel101", "mode": "layer2",
+ "type": "portchannel", "ip_forward": "enable"}
updates:
description: command list sent to the device
returned: always
@@ -117,31 +160,178 @@
returned: always
type: boolean
sample: true
-
'''
+import json
+
+# COMMON CODE FOR MIGRATION
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.shell import ShellError
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
def is_default_interface(interface, module):
"""Checks to see if interface exists and if it is a default config
-
Args:
interface (str): full name of interface, i.e. vlan10,
Ethernet1/1, loopback10
-
Returns:
True: if interface has default config
False: if it does not have a default config
DNE (str): if the interface does not exist - loopbacks, SVIs, etc.
-
"""
command = 'show run interface ' + interface
try:
body = execute_show_command(command, module,
command_type='cli_show_ascii')[0]
- # module.exit_json(abcd='asdasdfasdf', body=body, c=command)
except IndexError:
- body = []
+ body = ''
if body:
raw_list = body.split('\n')
@@ -157,44 +347,14 @@ def is_default_interface(interface, module):
return 'DNE'
-def get_available_features(feature, module):
- available_features = {}
- command = 'show feature'
- body = execute_show_command(command, module)
-
- try:
- body = body[0]['TABLE_cfcFeatureCtrlTable']['ROW_cfcFeatureCtrlTable']
- except (TypeError, IndexError):
- return available_features
-
- for each_feature in body:
- feature = each_feature['cfcFeatureCtrlName2']
- state = each_feature['cfcFeatureCtrlOpStatus2']
-
- if 'enabled' in state:
- state = 'enabled'
-
- if feature not in available_features.keys():
- available_features[feature] = state
- else:
- if (available_features[feature] == 'disabled' and
- state == 'enabled'):
- available_features[feature] = state
-
- return available_features
-
-
def get_interface_type(interface):
"""Gets the type of interface
-
Args:
interface (str): full name of interface, i.e. Ethernet1/1, loopback10,
port-channel20, vlan20
-
Returns:
type of interface: ethernet, svi, loopback, management, portchannel,
or unknown
-
"""
if interface.upper().startswith('ET'):
return 'ethernet'
@@ -208,20 +368,19 @@ def get_interface_type(interface):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
+ elif interface.upper().startswith('NV'):
+ return 'nve'
else:
return 'unknown'
def get_manual_interface_attributes(interface, module):
"""Gets admin state and description of a SVI interface. Hack due to API.
-
Args:
interface (str): full name of SVI interface, i.e. vlan10
-
Returns:
dictionary that has two k/v pairs: admin_state & description
if not an svi, returns None
-
"""
if get_interface_type(interface) == 'svi':
@@ -248,15 +407,12 @@ def get_manual_interface_attributes(interface, module):
def get_interface(intf, module):
"""Gets current config/state of interface
-
Args:
intf (string): full name of interface, i.e. Ethernet1/1, loopback10,
port-channel20, vlan20
-
Returns:
dictionary that has relevant config/state data about the given
interface based on the type of interface it is
-
"""
base_key_map = {
'interface': 'interface',
@@ -293,7 +449,6 @@ def get_interface(intf, module):
if body:
interface_table = body['TABLE_interface']['ROW_interface']
-
intf_type = get_interface_type(intf)
if intf_type in ['portchannel', 'ethernet']:
if not interface_table.get('eth_mode'):
@@ -315,6 +470,18 @@ def get_interface(intf, module):
'nxapibug'))
interface['description'] = str(attributes.get('description',
'nxapi_bug'))
+ command = 'show run interface ' + intf
+ body = execute_show_command(command, module,
+ command_type='cli_show_ascii')[0]
+ if 'ip forward' in body:
+ interface['ip_forward'] = 'enable'
+ else:
+ interface['ip_forward'] = 'disable'
+ if 'fabric forwarding mode anycast-gateway' in body:
+ interface['fabric_forwarding_anycast_gateway'] = True
+ else:
+ interface['fabric_forwarding_anycast_gateway'] = False
+
elif intf_type == 'loopback':
key_map.update(base_key_map)
key_map.pop('admin_state')
@@ -338,6 +505,13 @@ def get_interface(intf, module):
temp_dict['description'] = "None"
interface.update(temp_dict)
+ elif intf_type == 'nve':
+ key_map.update(base_key_map)
+ temp_dict = apply_key_map(key_map, interface_table)
+ if not temp_dict.get('description'):
+ temp_dict['description'] = "None"
+ interface.update(temp_dict)
+
interface['type'] = intf_type
return interface
@@ -350,18 +524,18 @@ def get_intf_args(interface):
if intf_type in ['ethernet', 'portchannel']:
arguments.extend(['mode'])
+ if intf_type == 'svi':
+ arguments.extend(['ip_forward', 'fabric_forwarding_anycast_gateway'])
return arguments
def get_interfaces_dict(module):
"""Gets all active interfaces on a given switch
-
Returns:
dictionary with interface type (ethernet,svi,loop,portchannel) as the
keys. Each value is a list of interfaces of given interface (key)
type.
-
"""
command = 'show interface status'
try:
@@ -375,12 +549,13 @@ def get_interfaces_dict(module):
'loopback': [],
'management': [],
'portchannel': [],
+ 'nve': [],
'unknown': []
}
interface_list = body.get('TABLE_interface')['ROW_interface']
- for i in interface_list:
- intf = i['interface']
+ for index in interface_list:
+ intf = index ['interface']
intf_type = get_interface_type(intf)
interfaces[intf_type].append(intf)
@@ -406,6 +581,8 @@ def _get_number(if_name):
if_type = 'loopback'
elif if_name.lower().startswith('po'):
if_type = 'port-channel'
+ elif if_name.lower().startswith('nv'):
+ if_type = 'nve'
else:
if_type = None
@@ -444,19 +621,15 @@ def apply_value_map(value_map, resource):
def get_interface_config_commands(interface, intf, existing):
"""Generates list of commands to configure on device
-
Args:
interface (str): k/v pairs in the form of a set that should
be configured on the device
intf (str): full name of interface, i.e. Ethernet1/1
-
Returns:
list: ordered list of commands to be sent to device
-
"""
commands = []
-
desc = interface.get('description')
if desc:
commands.append('description {0}'.format(desc))
@@ -474,6 +647,21 @@ def get_interface_config_commands(interface, intf, existing):
command = get_admin_state(interface, intf, admin_state)
commands.append(command)
+ ip_forward = interface.get('ip_forward')
+ if ip_forward:
+ if ip_forward == 'enable':
+ commands.append('ip forward')
+ else:
+ commands.append('no ip forward')
+
+ fabric_forwarding_anycast_gateway = interface.get(
+ 'fabric_forwarding_anycast_gateway')
+ if fabric_forwarding_anycast_gateway is not None:
+ if fabric_forwarding_anycast_gateway is True:
+ commands.append('fabric forwarding mode anycast-gateway')
+ elif fabric_forwarding_anycast_gateway is False:
+ commands.append('no fabric forwarding mode anycast-gateway')
+
if commands:
commands.insert(0, 'interface ' + intf)
@@ -498,7 +686,7 @@ def get_proposed(existing, normalized_interface, args):
# retrieves proper interface params from args (user defined params)
for param in allowed_params:
temp = args.get(param)
- if temp:
+ if temp is not None:
proposed[param] = temp
return proposed
@@ -516,7 +704,7 @@ def smart_existing(module, intf_type, normalized_interface):
if intf_type == 'ethernet':
module.fail_json(msg='Invalid Ethernet interface provided.',
interface=normalized_interface)
- elif intf_type in ['loopback', 'portchannel', 'svi']:
+ elif intf_type in ['loopback', 'portchannel', 'svi', 'nve']:
existing = {}
is_default = 'DNE'
return existing, is_default
@@ -529,6 +717,15 @@ def execute_config_command(commands, module):
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
@@ -552,6 +749,11 @@ def get_cli_body_ssh(command, response, module):
def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
@@ -559,8 +761,21 @@ def execute_show(cmds, module, command_type=None):
response = module.execute(cmds)
except ShellError:
clie = get_exception()
- module.fail_json(msg='Error sending {0}'.format(command),
+ module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
return response
@@ -580,7 +795,10 @@ def execute_show_command(command, module, command_type='cli_show'):
def execute_modified_show_for_cli_text(command, module):
cmds = [command]
- response = execute_show(cmds, module)
+ if module.params['transport'] == 'cli':
+ response = execute_show(cmds, module)
+ else:
+ response = execute_show(cmds, module, command_type='cli_show_ascii')
body = response
return body
@@ -595,90 +813,124 @@ def flatten_list(command_lists):
return flat_command_list
+def get_interface_type_removed_cmds(interfaces):
+ commands = []
+
+ for interface in interfaces:
+ if interface != 'Vlan1':
+ commands.append('no interface {0}'.format(interface))
+
+ return commands
+
+
def main():
argument_spec = dict(
- interface=dict(required=True,),
+ interface=dict(required=False,),
admin_state=dict(default='up', choices=['up', 'down'], required=False),
description=dict(required=False, default=None),
mode=dict(choices=['layer2', 'layer3'], required=False),
+ interface_type=dict(required=False,
+ choices=['loopback', 'portchannel', 'svi', 'nve']),
+ ip_forward=dict(required=False, choices=['enable', 'disable']),
+ fabric_forwarding_anycast_gateway=dict(required=False, type='bool'),
state=dict(choices=['absent', 'present', 'default'],
- default='present', required=False)
+ default='present', required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
)
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = get_network_module(argument_spec=argument_spec,
+ mutually_exclusive=[['interface', 'interface_type']],
+ supports_check_mode=True)
- interface = module.params['interface'].lower()
+ interface = module.params['interface']
+ interface_type = module.params['interface_type']
admin_state = module.params['admin_state']
description = module.params['description']
mode = module.params['mode']
+ ip_forward = module.params['ip_forward']
+ fabric_forwarding_anycast_gateway = module.params['fabric_forwarding_anycast_gateway']
state = module.params['state']
- changed = False
-
- args = dict(interface=interface, admin_state=admin_state,
- description=description, mode=mode)
-
- intf_type = get_interface_type(interface)
-
- normalized_interface = normalize_interface(interface)
-
- if normalized_interface == 'Vlan1' and state == 'absent':
- module.fail_json(msg='ERROR: CANNOT REMOVE VLAN 1!')
-
- if intf_type == 'svi':
- feature = 'interface-vlan'
- available_features = get_available_features(feature, module)
- svi_state = available_features[feature]
- if svi_state == 'disabled':
+ if interface:
+ interface = interface.lower()
+ intf_type = get_interface_type(interface)
+ normalized_interface = normalize_interface(interface)
+
+ if normalized_interface == 'Vlan1' and state == 'absent':
+ module.fail_json(msg='ERROR: CANNOT REMOVE VLAN 1!')
+
+ if intf_type == 'nve':
+ if description or mode:
+ module.fail_json(msg='description and mode params are not '
+ 'supported in this module. Use '
+ 'nxos_vxlan_vtep instead.')
+ if ((ip_forward or fabric_forwarding_anycast_gateway) and
+ intf_type != 'svi'):
+ module.fail_json(msg='The ip_forward and '
+ 'fabric_forwarding_anycast_gateway features '
+ ' are only available for SVIs.')
+ args = dict(interface=interface, admin_state=admin_state,
+ description=description, mode=mode, ip_forward=ip_forward,
+ fabric_forwarding_anycast_gateway=fabric_forwarding_anycast_gateway)
+
+ if intf_type == 'unknown':
module.fail_json(
- msg='SVI (interface-vlan) feature needs to be enabled first',
- )
-
- if intf_type == 'unknown':
- module.fail_json(
- msg='unknown interface type found-1',
- interface=interface)
+ msg='unknown interface type found-1',
+ interface=interface)
- existing, is_default = smart_existing(module, intf_type, normalized_interface)
- proposed = get_proposed(existing, normalized_interface, args)
+ existing, is_default = smart_existing(module, intf_type, normalized_interface)
+ proposed = get_proposed(existing, normalized_interface, args)
+ else:
+ intf_type = normalized_interface = interface_type
+ proposed = dict(interface_type=interface_type)
- delta = dict()
+ changed = False
commands = []
-
- if state == 'absent':
- if intf_type in ['svi', 'loopback', 'portchannel']:
- if is_default != 'DNE':
- cmds = ['no interface {0}'.format(normalized_interface)]
+ if interface:
+ delta = dict()
+
+ if state == 'absent':
+ if intf_type in ['svi', 'loopback', 'portchannel', 'nve']:
+ if is_default != 'DNE':
+ cmds = ['no interface {0}'.format(normalized_interface)]
+ commands.append(cmds)
+ elif intf_type in ['ethernet']:
+ if is_default is False:
+ cmds = ['default interface {0}'.format(normalized_interface)]
+ commands.append(cmds)
+ elif state == 'present':
+ if not existing:
+ cmds = get_interface_config_commands(proposed,
+ normalized_interface,
+ existing)
commands.append(cmds)
- elif intf_type in ['ethernet']:
+ else:
+ delta = dict(set(proposed.iteritems()).difference(
+ existing.iteritems()))
+ if delta:
+ cmds = get_interface_config_commands(delta,
+ normalized_interface,
+ existing)
+ commands.append(cmds)
+ elif state == 'default':
if is_default is False:
cmds = ['default interface {0}'.format(normalized_interface)]
commands.append(cmds)
- elif state == 'present':
- if not existing:
- cmds = get_interface_config_commands(proposed,
- normalized_interface,
- existing)
- commands.append(cmds)
- else:
- delta = dict(set(proposed.iteritems()).difference(
- existing.iteritems()))
- if delta:
- cmds = get_interface_config_commands(delta,
- normalized_interface,
- existing)
- commands.append(cmds)
- elif state == 'default':
- if is_default is False:
- cmds = ['default interface {0}'.format(normalized_interface)]
- commands.append(cmds)
- elif is_default == 'DNE':
- module.exit_json(msg='interface you are trying to default does'
- ' not exist')
+ elif is_default == 'DNE':
+ module.exit_json(msg='interface you are trying to default does'
+ ' not exist')
+ elif interface_type:
+ if state == 'present':
+ module.fail_json(msg='The interface_type param can be used '
+ 'only with state absent.')
+
+ existing = get_interfaces_dict(module)[interface_type]
+ cmds = get_interface_type_removed_cmds(existing)
+ commands.append(cmds)
cmds = flatten_list(commands)
-
end_state = existing
if cmds:
@@ -686,35 +938,33 @@ def main():
module.exit_json(changed=True, commands=cmds)
else:
execute_config_command(cmds, module)
- if delta.get('mode'): # or delta.get('admin_state'):
- # if the mode changes from L2 to L3, the admin state
- # seems to change after the API call, so adding a second API
- # call to ensure it's in the desired state.
- admin_state = delta.get('admin_state') or admin_state
- c1 = 'interface {0}'.format(normalized_interface)
- c2 = get_admin_state(delta, normalized_interface, admin_state)
- cmds2 = [c1, c2]
- execute_config_command(cmds2, module)
- cmds.extend(cmds2)
changed = True
- end_state, is_default = smart_existing(module, intf_type,
- normalized_interface)
+ if module.params['interface']:
+ if delta.get('mode'): # or delta.get('admin_state'):
+ # if the mode changes from L2 to L3, the admin state
+ # seems to change after the API call, so adding a second API
+ # call to ensure it's in the desired state.
+ admin_state = delta.get('admin_state') or admin_state
+ c1 = 'interface {0}'.format(normalized_interface)
+ c2 = get_admin_state(delta, normalized_interface, admin_state)
+ cmds2 = [c1, c2]
+ execute_config_command(cmds2, module)
+ cmds.extend(cmds2)
+ end_state, is_default = smart_existing(module, intf_type,
+ normalized_interface)
+ else:
+ end_state = get_interfaces_dict(module)[interface_type]
+ cmds = [cmd for cmd in cmds if cmd != 'configure']
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
- results['state'] = state
results['updates'] = cmds
results['changed'] = changed
module.exit_json(**results)
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.nxos import *
if __name__ == '__main__':
- main()
+ main()
\ No newline at end of file
diff --git a/network/nxos/nxos_interface_ospf.py b/network/nxos/nxos_interface_ospf.py
new file mode 100644
index 00000000000..9f63bf57a04
--- /dev/null
+++ b/network/nxos/nxos_interface_ospf.py
@@ -0,0 +1,681 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_interface_ospf
+version_added: "2.2"
+short_description: Manages configuration of an OSPF interface instance.
+description:
+ - Manages configuration of an OSPF interface instance.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - Default, where supported, restores params default value.
+ - To remove an existing authentication configuration you should use
+ C(message_digest_key_id=default) plus all other options matching their
+ existing values.
+ - C(state=absent) removes the whole OSPF interface configuration.
+options:
+ interface:
+ description:
+ - Name of this cisco_interface resource. Valid value is a string.
+ required: true
+ ospf:
+ description:
+ - Name of the ospf instance.
+ required: true
+ area:
+ description:
+ - Ospf area associated with this cisco_interface_ospf instance.
+ Valid values are a string, formatted as an IP address
+ (i.e. "0.0.0.0") or as an integer.
+ required: true
+ cost:
+ description:
+ - The cost associated with this cisco_interface_ospf instance.
+ required: false
+ default: null
+ hello_interval:
+ description:
+ - Time between sending successive hello packets.
+ Valid values are an integer or the keyword 'default'.
+ required: false
+ default: null
+ dead_interval:
+ description:
+ - Time interval an ospf neighbor waits for a hello
+ packet before tearing down adjacencies. Valid values are an
+ integer or the keyword 'default'.
+ required: false
+ default: null
+ passive_interface:
+ description:
+ - Setting to true will prevent this interface from receiving
+ HELLO packets. Valid values are 'true' and 'false'.
+ required: false
+ choices: ['true','false']
+ default: null
+ message_digest:
+ description:
+ - Enables or disables the usage of message digest authentication.
+ Valid values are 'true' and 'false'.
+ required: false
+ choices: ['true','false']
+ default: null
+ message_digest_key_id:
+ description:
+ - Md5 authentication key-id associated with the ospf instance.
+ If this is present, message_digest_encryption_type,
+ message_digest_algorithm_type and message_digest_password are
+ mandatory. Valid value is an integer and 'default'.
+ required: false
+ default: null
+ message_digest_algorithm_type:
+ description:
+ - Algorithm used for authentication among neighboring routers
+ within an area. Valid values is 'md5'.
+ required: false
+ choices: ['md5']
+ default: null
+ message_digest_encryption_type:
+ description:
+ - Specifies the scheme used for encrypting message_digest_password.
+ Valid values are '3des' or 'cisco_type_7' encryption.
+ required: false
+ choices: ['cisco_type_7','3des']
+ default: null
+ message_digest_password:
+ description:
+ - Specifies the message_digest password. Valid value is a string.
+ required: false
+ default: null
+ state:
+ description:
+ - Determines whether the config should be present or not
+ on the device.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+EXAMPLES = '''
+- nxos_interface_ospf:
+ interface: ethernet1/32
+ ospf: 1
+ area: 1
+ cost: default
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"area": "1", "interface": "ethernet1/32", "ospf": "1"}
+existing:
+ description: k/v pairs of existing OSPF configuration
+ returned: verbose mode
+ type: dict
+ sample: {"area": "", "cost": "", "dead_interval": "",
+ "hello_interval": "", "interface": "ethernet1/32",
+ "message_digest": false, "message_digest_algorithm_type": "",
+ "message_digest_encryption_type": "",
+ "message_digest_key_id": "", "message_digest_password": "",
+ "ospf": "", "passive_interface": false}
+end_state:
+ description: k/v pairs of OSPF configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"area": "0.0.0.1", "cost": "", "dead_interval": "",
+ "hello_interval": "", "interface": "ethernet1/32",
+ "message_digest": false, "message_digest_algorithm_type": "",
+ "message_digest_encryption_type": "", "message_digest_key_id": "",
+ "message_digest_password": "", "ospf": "1",
+ "passive_interface": false}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["interface Ethernet1/32", "ip router ospf 1 area 0.0.0.1"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+# COMMON CODE FOR MIGRATION
+import re
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.shell import ShellError
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+BOOL_PARAMS = [
+ 'passive_interface',
+ 'message_digest'
+]
+PARAM_TO_COMMAND_KEYMAP = {
+ 'cost': 'ip ospf cost',
+ 'ospf': 'ip router ospf',
+ 'area': 'ip router ospf',
+ 'hello_interval': 'ip ospf hello-interval',
+ 'dead_interval': 'ip ospf dead-interval',
+ 'passive_interface': 'ip ospf passive-interface',
+ 'message_digest': 'ip ospf authentication message-digest',
+ 'message_digest_key_id': 'ip ospf message-digest-key',
+ 'message_digest_algorithm_type': 'ip ospf message-digest-key options',
+ 'message_digest_encryption_type': 'ip ospf message-digest-key options',
+ 'message_digest_password': 'ip ospf message-digest-key options',
+}
+PARAM_TO_DEFAULT_KEYMAP = {
+}
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_custom_value(arg, config, module):
+ value = ''
+ if arg == 'ospf':
+ REGEX = re.compile(r'(?:ip router ospf\s)(?P.*)$', re.M)
+ value = ''
+ if 'ip router ospf' in config:
+ parsed = REGEX.search(config).group('value').split()
+ value = parsed[0]
+
+ elif arg == 'area':
+ REGEX = re.compile(r'(?:ip router ospf\s)(?P.*)$', re.M)
+ value = ''
+ if 'ip router ospf' in config:
+ parsed = REGEX.search(config).group('value').split()
+ value = parsed[2]
+
+ elif arg.startswith('message_digest_'):
+ REGEX = re.compile(r'(?:ip ospf message-digest-key\s)(?P.*)$', re.M)
+ value = ''
+ if 'ip ospf message-digest-key' in config:
+ value_list = REGEX.search(config).group('value').split()
+ if arg == 'message_digest_key_id':
+ value = value_list[0]
+ elif arg == 'message_digest_algorithm_type':
+ value = value_list[1]
+ elif arg == 'message_digest_encryption_type':
+ value = value_list[2]
+ if value == '3':
+ value = '3des'
+ elif value == '7':
+ value = 'cisco_type_7'
+ elif arg == 'message_digest_password':
+ value = value_list[3]
+
+ elif arg == 'passive_interface':
+ REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ NO_REGEX = re.compile(r'\s+no\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+ try:
+ if NO_REGEX.search(config):
+ value = False
+ elif REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+
+ return value
+
+
+def get_value(arg, config, module):
+ custom = [
+ 'ospf',
+ 'area',
+ 'message_digest_key_id',
+ 'message_digest_algorithm_type',
+ 'message_digest_encryption_type',
+ 'message_digest_password',
+ 'passive_interface'
+ ]
+
+ if arg in custom:
+ value = get_custom_value(arg, config, module)
+ elif arg in BOOL_PARAMS:
+ REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+ try:
+ if REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+ else:
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ netcfg = get_config(module)
+ parents = ['interface {0}'.format(module.params['interface'].capitalize())]
+ config = netcfg.get_section(parents)
+ if 'ospf' in config:
+ for arg in args:
+ if arg not in ['interface']:
+ existing[arg] = get_value(arg, config, module)
+ existing['interface'] = module.params['interface']
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def get_default_commands(existing, proposed, existing_commands, key, module):
+ commands = list()
+ existing_value = existing_commands.get(key)
+ if key.startswith('ip ospf message-digest-key'):
+ check = False
+ for param in ['message_digest_encryption_type',
+ 'message_digest_algorithm_type',
+ 'message_digest_password']:
+ if existing[param] == proposed[param]:
+ check = True
+ if check:
+ if existing['message_digest_encryption_type'] == '3des':
+ encryption_type = '3'
+ elif existing['message_digest_encryption_type'] == 'cisco_type_7':
+ encryption_type = '7'
+ command = 'no {0} {1} {2} {3} {4}'.format(
+ key,
+ existing['message_digest_key_id'],
+ existing['message_digest_algorithm_type'],
+ encryption_type,
+ existing['message_digest_password'])
+ commands.append(command)
+ else:
+ commands.append('no {0} {1}'.format(key, existing_value))
+ return commands
+
+
+def get_custom_command(existing_cmd, proposed, key, module):
+ commands = list()
+
+ if key == 'ip router ospf':
+ command = '{0} {1} area {2}'.format(key, proposed['ospf'],
+ proposed['area'])
+ if command not in existing_cmd:
+ commands.append(command)
+
+ elif key.startswith('ip ospf message-digest-key'):
+ if (proposed['message_digest_key_id'] != 'default' and
+ 'options' not in key):
+ if proposed['message_digest_encryption_type'] == '3des':
+ encryption_type = '3'
+ elif proposed['message_digest_encryption_type'] == 'cisco_type_7':
+ encryption_type = '7'
+ command = '{0} {1} {2} {3} {4}'.format(
+ key,
+ proposed['message_digest_key_id'],
+ proposed['message_digest_algorithm_type'],
+ encryption_type,
+ proposed['message_digest_password'])
+ commands.append(command)
+ return commands
+
+
+def state_present(module, existing, proposed, candidate):
+ commands = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ for key, value in proposed_commands.iteritems():
+ if value is True:
+ commands.append(key)
+ elif value is False:
+ commands.append('no {0}'.format(key))
+ elif value == 'default':
+ if existing_commands.get(key):
+ commands.extend(get_default_commands(existing, proposed,
+ existing_commands, key,
+ module))
+ else:
+ if (key == 'ip router ospf' or
+ key.startswith('ip ospf message-digest-key')):
+ commands.extend(get_custom_command(commands, proposed,
+ key, module))
+ else:
+ command = '{0} {1}'.format(key, value.lower())
+ commands.append(command)
+
+ if commands:
+ parents = ['interface {0}'.format(module.params['interface'].capitalize())]
+ candidate.add(commands, parents=parents)
+
+
+def state_absent(module, existing, proposed, candidate):
+ commands = []
+ parents = ['interface {0}'.format(module.params['interface'].capitalize())]
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ for key, value in existing_commands.iteritems():
+ if value:
+ if key.startswith('ip ospf message-digest-key'):
+ if 'options' not in key:
+ if existing['message_digest_encryption_type'] == '3des':
+ encryption_type = '3'
+ elif existing['message_digest_encryption_type'] == 'cisco_type_7':
+ encryption_type = '7'
+ command = 'no {0} {1} {2} {3} {4}'.format(
+ key,
+ existing['message_digest_key_id'],
+ existing['message_digest_algorithm_type'],
+ encryption_type,
+ existing['message_digest_password'])
+ commands.append(command)
+ elif key in ['ip ospf authentication message-digest',
+ 'ip ospf passive-interface']:
+ if value:
+ commands.append('no {0}'.format(key))
+ elif key == 'ip router ospf':
+ command = 'no {0} {1} area {2}'.format(key, proposed['ospf'],
+ proposed['area'])
+ if command not in commands:
+ commands.append(command)
+ else:
+ existing_value = existing_commands.get(key)
+ commands.append('no {0} {1}'.format(key, existing_value))
+
+ candidate.add(commands, parents=parents)
+
+
+def normalize_area(area, module):
+ try:
+ area = int(area)
+ area = '0.0.0.{0}'.format(area)
+ except ValueError:
+ splitted_area = area.split('.')
+ if len(splitted_area) != 4:
+ module.fail_json(msg='Incorrect Area ID format', area=area)
+ return area
+
+
+def main():
+ argument_spec = dict(
+ interface=dict(required=True, type='str'),
+ ospf=dict(required=True, type='str'),
+ area=dict(required=True, type='str'),
+ cost=dict(required=False, type='str'),
+ hello_interval=dict(required=False, type='str'),
+ dead_interval=dict(required=False, type='str'),
+ passive_interface=dict(required=False, type='bool'),
+ message_digest=dict(required=False, type='bool'),
+ message_digest_key_id=dict(required=False, type='str'),
+ message_digest_algorithm_type=dict(required=False, type='str',
+ choices=['md5']),
+ message_digest_encryption_type=dict(required=False, type='str',
+ choices=['cisco_type_7','3des']),
+ message_digest_password=dict(required=False, type='str'),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ required_together=[['message_digest_key_id',
+ 'message_digest_algorithm_type',
+ 'message_digest_encryption_type',
+ 'message_digest_password']],
+ supports_check_mode=True)
+
+ for param in ['message_digest_encryption_type',
+ 'message_digest_algorithm_type',
+ 'message_digest_password']:
+ if module.params[param] == 'default':
+ module.exit_json(msg='Use message_digest_key_id=default to remove'
+ ' an existing authentication configuration')
+
+ state = module.params['state']
+ args = [
+ 'interface',
+ 'ospf',
+ 'area',
+ 'cost',
+ 'hello_interval',
+ 'dead_interval',
+ 'passive_interface',
+ 'message_digest',
+ 'message_digest_key_id',
+ 'message_digest_algorithm_type',
+ 'message_digest_encryption_type',
+ 'message_digest_password'
+ ]
+
+ existing = invoke('get_existing', module, args)
+ end_state = existing
+ proposed_args = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ proposed = {}
+ for key, value in proposed_args.iteritems():
+ if key != 'interface':
+ if str(value).lower() == 'true':
+ value = True
+ elif str(value).lower() == 'false':
+ value = False
+ elif str(value).lower() == 'default':
+ value = PARAM_TO_DEFAULT_KEYMAP.get(key)
+ if value is None:
+ value = 'default'
+ if existing.get(key) or (not existing.get(key) and value):
+ proposed[key] = value
+
+ proposed['area'] = normalize_area(proposed['area'], module)
+ result = {}
+ if (state == 'present' or (state == 'absent' and
+ existing.get('ospf') == proposed['ospf'] and
+ existing.get('area') == proposed['area'])):
+
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed_args
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_ip_interface.py b/network/nxos/nxos_ip_interface.py
index 39dd7226b47..b860ea20b6f 100644
--- a/network/nxos/nxos_ip_interface.py
+++ b/network/nxos/nxos_ip_interface.py
@@ -16,24 +16,27 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: nxos_ip_interface
version_added: "2.1"
-short_description: Manages L3 attributes for IPv4 and IPv6 interfaces
+short_description: Manages L3 attributes for IPv4 and IPv6 interfaces.
description:
- - Manages Layer 3 attributes for IPv4 and IPv6 interfaces
+ - Manages Layer 3 attributes for IPv4 and IPv6 interfaces.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- - Interface must already be a L3 port when using this module
- - Logical interfaces (po, loop, svi) must be created first
- - I(mask) must be inserted in decimal format (i.e. 24) for
+ - Interface must already be a L3 port when using this module.
+ - Logical interfaces (po, loop, svi) must be created first.
+ - C(mask) must be inserted in decimal format (i.e. 24) for
both IPv6 and IPv4.
- A single interface can have multiple IPv6 configured.
-
options:
interface:
description:
@@ -58,10 +61,23 @@
'''
EXAMPLES = '''
-# ensure ipv4 address is configured on Ethernet1/32
-- nxos_ip_interface: interface=Ethernet1/32 transport=nxapi version=v4 state=present addr=20.20.20.20 mask=24
-# ensure ipv6 address is configured on Ethernet1/31
-- nxos_ip_interface: interface=Ethernet1/31 transport=cli version=v6 state=present addr=2001::db8:800:200c:cccb mask=64
+- name: Ensure ipv4 address is configured on Ethernet1/32
+ nxos_ip_interface:
+ interface: Ethernet1/32
+ transport: nxapi
+ version: v4
+ state: present
+ addr: 20.20.20.20
+ mask: 24
+
+- name: Ensure ipv6 address is configured on Ethernet1/31
+ nxos_ip_interface:
+ interface: Ethernet1/31
+ transport: cli
+ version: v6
+ state: present
+ addr: '2001::db8:800:200c:cccb'
+ mask: 64
'''
RETURN = '''
@@ -83,11 +99,6 @@
sample: {"addresses": [{"addr": "20.20.20.20", "mask": 24}],
"interface": "ethernet1/32", "prefix": "20.20.20.0",
"type": "ethernet", "vrf": "default"}
-state:
- description: state as sent in from the playbook
- returned: always
- type: string
- sample: "present"
updates:
description: commands sent to the device
returned: always
@@ -100,6 +111,162 @@
sample: true
'''
+import json
+import collections
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
def execute_config_command(commands, module):
try:
@@ -108,6 +275,15 @@ def execute_config_command(commands, module):
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
@@ -120,7 +296,7 @@ def get_cli_body_ssh(command, response, module):
"""
if 'xml' in response[0]:
body = []
- elif '^' in response[0] or 'show run' in response[0]:
+ elif '^' in response[0] or 'show run' in response[0] or response[0] == '\n':
body = response
else:
try:
@@ -132,6 +308,11 @@ def get_cli_body_ssh(command, response, module):
def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
@@ -141,6 +322,19 @@ def execute_show(cmds, module, command_type=None):
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
return response
@@ -383,7 +577,7 @@ def get_config_ip_commands(delta, interface, existing, version):
# loop used in the situation that just an IP address or just a
# mask is changing, not both.
for each in ['addr', 'mask']:
- if each not in delta.keys():
+ if each not in delta:
delta[each] = existing[each]
if version == 'v4':
@@ -442,9 +636,12 @@ def main():
mask=dict(type='str', required=False),
state=dict(required=False, default='present',
choices=['present', 'absent']),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
)
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
addr = module.params['addr']
version = module.params['version']
@@ -509,6 +706,8 @@ def main():
changed = True
end_state, address_list = get_ip_interface(interface, version,
module)
+ if 'configure' in cmds:
+ cmds.pop(0)
results = {}
results['proposed'] = proposed
@@ -516,15 +715,9 @@ def main():
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
- results['state'] = state
module.exit_json(**results)
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.nxos import *
if __name__ == '__main__':
main()
diff --git a/network/nxos/nxos_mtu.py b/network/nxos/nxos_mtu.py
new file mode 100644
index 00000000000..48a92c2f941
--- /dev/null
+++ b/network/nxos/nxos_mtu.py
@@ -0,0 +1,598 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_mtu
+version_added: "2.2"
+short_description: Manages MTU settings on Nexus switch.
+description:
+ - Manages MTU settings on Nexus switch.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+notes:
+ - Either C(sysmtu) param is required or C(interface) AND C(mtu) params are req'd.
+ - C(state=absent) unconfigures a given MTU if that value is currently present.
+options:
+ interface:
+ description:
+ - Full name of interface, i.e. Ethernet1/1.
+ required: false
+ default: null
+ mtu:
+ description:
+ - MTU for a specific interface.
+ required: false
+ default: null
+ sysmtu:
+ description:
+ - System jumbo MTU.
+ required: false
+ default: null
+ state:
+ description:
+ - Specify desired state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+# Ensure system mtu is 9126
+- nxos_mtu:
+ sysmtu: 9216
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# Config mtu on Eth1/1 (routed interface)
+- nxos_mtu:
+ interface: Ethernet1/1
+ mtu: 1600
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# Config mtu on Eth1/3 (switched interface)
+- nxos_mtu:
+ interface: Ethernet1/3
+ mtu: 9216
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# Unconfigure mtu on a given interface
+- nxos_mtu:
+ interface: Ethernet1/3
+ mtu: 9216
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ state: absent
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"mtu": "1700"}
+existing:
+ description:
+ - k/v pairs of existing mtu/sysmtu on the interface/system
+ type: dict
+ sample: {"mtu": "1600", "sysmtu": "9216"}
+end_state:
+ description: k/v pairs of mtu/sysmtu values after module execution
+ returned: always
+ type: dict
+ sample: {"mtu": "1700", sysmtu": "9216"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["interface vlan10", "mtu 1700"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0] or response[0] == '\n':
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_mtu(interface, module):
+ command = 'show interface {0}'.format(interface)
+ mtu = {}
+
+ body = execute_show_command(command, module)
+
+ try:
+ mtu_table = body[0]['TABLE_interface']['ROW_interface']
+ mtu['mtu'] = str(
+ mtu_table.get('eth_mtu',
+ mtu_table.get('svi_mtu', 'unreadable_via_api')))
+ mtu['sysmtu'] = get_system_mtu(module)['sysmtu']
+ except KeyError:
+ mtu = {}
+
+ return mtu
+
+
+def get_system_mtu(module):
+ command = 'show run all | inc jumbomtu'
+ sysmtu = ''
+
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+
+ if body:
+ sysmtu = str(body[0].split(' ')[-1])
+ try:
+ sysmtu = int(sysmtu)
+ except:
+ sysmtu = ""
+
+ return dict(sysmtu=str(sysmtu))
+
+
+def get_commands_config_mtu(delta, interface):
+ CONFIG_ARGS = {
+ 'mtu': 'mtu {mtu}',
+ 'sysmtu': 'system jumbomtu {sysmtu}',
+ }
+
+ commands = []
+ for param, value in delta.iteritems():
+ command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
+ if command and command != 'DNE':
+ commands.append(command)
+ command = None
+ mtu_check = delta.get('mtu', None)
+ if mtu_check:
+ commands.insert(0, 'interface {0}'.format(interface))
+ return commands
+
+
+def get_commands_remove_mtu(delta, interface):
+ CONFIG_ARGS = {
+ 'mtu': 'no mtu {mtu}',
+ 'sysmtu': 'no system jumbomtu {sysmtu}',
+ }
+ commands = []
+ for param, value in delta.iteritems():
+ command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
+ if command and command != 'DNE':
+ commands.append(command)
+ command = None
+ mtu_check = delta.get('mtu', None)
+ if mtu_check:
+ commands.insert(0, 'interface {0}'.format(interface))
+ return commands
+
+
+def get_interface_type(interface):
+ if interface.upper().startswith('ET'):
+ return 'ethernet'
+ elif interface.upper().startswith('VL'):
+ return 'svi'
+ elif interface.upper().startswith('LO'):
+ return 'loopback'
+ elif interface.upper().startswith('MG'):
+ return 'management'
+ elif interface.upper().startswith('MA'):
+ return 'management'
+ elif interface.upper().startswith('PO'):
+ return 'portchannel'
+ else:
+ return 'unknown'
+
+
+def is_default(interface, module):
+ command = 'show run interface {0}'.format(interface)
+
+ try:
+ body = execute_show_command(
+ command, module, command_type='cli_show_ascii')[0]
+ if body == 'DNE':
+ return 'DNE'
+ else:
+ raw_list = body.split('\n')
+ if raw_list[-1].startswith('interface'):
+ return True
+ else:
+ return False
+ except (KeyError):
+ return 'DNE'
+
+
+def get_interface_mode(interface, intf_type, module):
+ command = 'show interface {0}'.format(interface)
+ mode = 'unknown'
+ interface_table = {}
+ body = execute_show_command(command, module)
+
+ try:
+ interface_table = body[0]['TABLE_interface']['ROW_interface']
+ except (KeyError, AttributeError, IndexError):
+ return mode
+
+ if intf_type in ['ethernet', 'portchannel']:
+ mode = str(interface_table.get('eth_mode', 'layer3'))
+ if mode in ['access', 'trunk']:
+ mode = 'layer2'
+ elif mode == 'routed':
+ mode = 'layer3'
+ elif intf_type in ['loopback', 'svi']:
+ mode = 'layer3'
+ return mode
+
+
+def main():
+ argument_spec = dict(
+ mtu=dict(type='str'),
+ interface=dict(type='str'),
+ sysmtu=dict(type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ required_together=[['mtu', 'interface']],
+ supports_check_mode=True)
+
+ interface = module.params['interface']
+ mtu = module.params['mtu']
+ sysmtu = module.params['sysmtu']
+ state = module.params['state']
+
+ if sysmtu and (interface or mtu):
+ module.fail_json(msg='Proper usage-- either just use the sysmtu param '
+ 'or use interface AND mtu params')
+
+ if interface:
+ intf_type = get_interface_type(interface)
+ if intf_type != 'ethernet':
+ if is_default(interface, module) == 'DNE':
+ module.fail_json(msg='Invalid interface. It does not exist '
+ 'on the switch.')
+
+ existing = get_mtu(interface, module)
+ else:
+ existing = get_system_mtu(module)
+
+ if interface and mtu:
+ if intf_type == 'loopback':
+ module.fail_json(msg='Cannot set MTU for loopback interface.')
+ mode = get_interface_mode(interface, intf_type, module)
+ if mode == 'layer2':
+ if intf_type in ['ethernet', 'portchannel']:
+ if mtu not in [existing['sysmtu'], '1500']:
+ module.fail_json(msg='MTU on L2 interfaces can only be set'
+ ' to the system default (1500) or '
+ 'existing sysmtu value which is '
+ ' {0}'.format(existing['sysmtu']))
+ elif mode == 'layer3':
+ if intf_type in ['ethernet', 'portchannel', 'svi']:
+ if ((int(mtu) < 576 or int(mtu) > 9216) or
+ ((int(mtu) % 2) != 0)):
+ module.fail_json(msg='Invalid MTU for Layer 3 interface'
+ 'needs to be an even number between'
+ '576 and 9216')
+ if sysmtu:
+ if ((int(sysmtu) < 576 or int(sysmtu) > 9216 or
+ ((int(sysmtu) % 2) != 0))):
+ module.fail_json(msg='Invalid MTU- needs to be an even '
+ 'number between 576 and 9216')
+
+ args = dict(mtu=mtu, sysmtu=sysmtu)
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+
+ changed = False
+ end_state = existing
+ commands = []
+
+ if state == 'present':
+ if delta:
+ command = get_commands_config_mtu(delta, interface)
+ commands.append(command)
+
+ elif state == 'absent':
+ common = set(proposed.iteritems()).intersection(existing.iteritems())
+ if common:
+ command = get_commands_remove_mtu(dict(common), interface)
+ commands.append(command)
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ if interface:
+ end_state = get_mtu(interface, module)
+ else:
+ end_state = get_system_mtu(module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_ntp.py b/network/nxos/nxos_ntp.py
new file mode 100644
index 00000000000..4c6e406e6b9
--- /dev/null
+++ b/network/nxos/nxos_ntp.py
@@ -0,0 +1,635 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_ntp
+version_added: "2.2"
+short_description: Manages core NTP configuration.
+description:
+ - Manages core NTP configuration.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+options:
+ server:
+ description:
+ - Network address of NTP server.
+ required: false
+ default: null
+ peer:
+ description:
+ - Network address of NTP peer.
+ required: false
+ default: null
+ key_id:
+ description:
+ - Authentication key identifier to use with
+ given NTP server or peer.
+ required: false
+ default: null
+ prefer:
+ description:
+ - Makes given NTP server or peer the preferred
+ NTP server or peer for the device.
+ required: false
+ default: null
+ choices: ['enabled', 'disabled']
+ vrf_name:
+ description:
+ - Makes the device communicate with the given
+ NTP server or peer over a specific VRF.
+ required: false
+ default: null
+ source_addr:
+ description:
+ - Local source address from which NTP messages are sent.
+ required: false
+ default: null
+ source_int:
+ description:
+ - Local source interface from which NTP messages are sent.
+ Must be fully qualified interface name.
+ required: false
+ default: null
+ state:
+ description:
+ - Manage the state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+# Set NTP Server with parameters
+- nxos_ntp:
+ server: 1.2.3.4
+ key_id: 32
+ prefer: enabled
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"address": "2.2.2.2", "key_id": "48",
+ "peer_type": "server", "prefer": "enabled",
+ "source": "3.3.3.3", "source_type": "source"}
+existing:
+ description:
+ - k/v pairs of existing ntp server/peer
+ type: dict
+ sample: {"address": "2.2.2.2", "key_id": "32",
+ "peer_type": "server", "prefer": "enabled",
+ "source": "ethernet2/1", "source_type": "source-interface"}
+end_state:
+ description: k/v pairs of ntp info after module execution
+ returned: always
+ type: dict
+ sample: {"address": "2.2.2.2", "key_id": "48",
+ "peer_type": "server", "prefer": "enabled",
+ "source": "3.3.3.3", "source_type": "source"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["ntp server 2.2.2.2 prefer key 48",
+ "no ntp source-interface ethernet2/1", "ntp source 3.3.3.3"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0] or response[0] == '\n':
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_ntp_source(module):
+ source_type = None
+ source = None
+ command = 'show run | inc ntp.source'
+ output = execute_show_command(command, module, command_type='cli_show_ascii')
+
+ if output:
+ try:
+ if 'interface' in output[0]:
+ source_type = 'source-interface'
+ else:
+ source_type = 'source'
+ source = output[0].split()[2].lower()
+ except AttributeError:
+ source_type = None
+ source = None
+
+ return source_type, source
+
+
+def get_ntp_peer(module):
+ command = 'show run | inc ntp.(server|peer)'
+ ntp_peer_list = []
+ ntp = execute_show_command(
+ command, module, command_type='cli_show_ascii')[0]
+
+ ntp_regex = (
+ ".*ntp\s(server\s(?P\S+)|peer\s(?P\S+))"
+ "\s*((?Pprefer)\s*)?(use-vrf\s(?P\S+)\s*)?"
+ "(key\s(?P\d+))?.*"
+ )
+
+ split_ntp = ntp.splitlines()
+ for peer_line in split_ntp:
+ ntp_peer = {}
+ try:
+ peer_address = None
+ vrf_name = None
+ prefer = None
+ key_id = None
+ match_ntp = re.match(ntp_regex, peer_line, re.DOTALL)
+ group_ntp = match_ntp.groupdict()
+
+ address = group_ntp["address"]
+ peer_address = group_ntp['peer_address']
+ prefer = group_ntp['prefer']
+ vrf_name = group_ntp['vrf_name']
+ key_id = group_ntp['key_id']
+
+ if prefer is not None:
+ prefer = 'enabled'
+ else:
+ prefer = 'disabled'
+
+ if address is not None:
+ peer_type = 'server'
+ elif peer_address is not None:
+ peer_type = 'peer'
+ address = peer_address
+
+ args = dict(peer_type=peer_type, address=address, prefer=prefer,
+ vrf_name=vrf_name, key_id=key_id)
+
+ ntp_peer = dict((k, v) for k, v in args.iteritems())
+ ntp_peer_list.append(ntp_peer)
+ except AttributeError:
+ ntp_peer_list = []
+
+ return ntp_peer_list
+
+
+def get_ntp_existing(address, peer_type, module):
+ peer_dict = {}
+ peer_server_list = []
+
+ peer_list = get_ntp_peer(module)
+ for peer in peer_list:
+ if peer['address'] == address:
+ peer_dict.update(peer)
+ else:
+ peer_server_list.append(peer)
+
+ source_type, source = get_ntp_source(module)
+
+ if (source_type is not None and source is not None):
+ peer_dict['source_type'] = source_type
+ peer_dict['source'] = source
+
+ return (peer_dict, peer_server_list)
+
+
+def set_ntp_server_peer(peer_type, address, prefer, key_id, vrf_name):
+ command_strings = []
+
+ if prefer:
+ command_strings.append(' prefer')
+ if key_id:
+ command_strings.append(' key {0}'.format(key_id))
+ if vrf_name:
+ command_strings.append(' use-vrf {0}'.format(vrf_name))
+
+ command_strings.insert(0, 'ntp {0} {1}'.format(peer_type, address))
+
+ command = ''.join(command_strings)
+
+ return command
+
+
+def config_ntp(delta, existing):
+ address = delta.get('address', existing.get('address'))
+ peer_type = delta.get('peer_type', existing.get('peer_type'))
+ vrf_name = delta.get('vrf_name', existing.get('vrf_name'))
+ key_id = delta.get('key_id', existing.get('key_id'))
+ prefer = delta.get('prefer', existing.get('prefer'))
+
+ source_type = delta.get('source_type')
+ source = delta.get('source')
+
+ if prefer:
+ if prefer == 'enabled':
+ prefer = True
+ elif prefer == 'disabled':
+ prefer = False
+
+ if source:
+ source_type = delta.get('source_type', existing.get('source_type'))
+
+ ntp_cmds = []
+ if peer_type:
+ ntp_cmds.append(set_ntp_server_peer(
+ peer_type, address, prefer, key_id, vrf_name))
+ if source:
+ existing_source_type = existing.get('source_type')
+ existing_source = existing.get('source')
+ if existing_source_type and source_type != existing_source_type:
+ ntp_cmds.append('no ntp {0} {1}'.format(existing_source_type, existing_source))
+ ntp_cmds.append('ntp {0} {1}'.format(source_type, source))
+
+ return ntp_cmds
+
+
+def main():
+ argument_spec = dict(
+ server=dict(type='str'),
+ peer=dict(type='str'),
+ key_id=dict(type='str'),
+ prefer=dict(type='str', choices=['enabled', 'disabled']),
+ vrf_name=dict(type='str'),
+ source_addr=dict(type='str'),
+ source_int=dict(type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['server','peer'],
+ ['source_addr','source_int']],
+ supports_check_mode=True)
+
+ server = module.params['server'] or None
+ peer = module.params['peer'] or None
+ key_id = module.params['key_id']
+ prefer = module.params['prefer']
+ vrf_name = module.params['vrf_name']
+ source_addr = module.params['source_addr']
+ source_int = module.params['source_int']
+ state = module.params['state']
+ if source_int is not None:
+ source_int = source_int.lower()
+
+ if server:
+ peer_type = 'server'
+ address = server
+ elif peer:
+ peer_type = 'peer'
+ address = peer
+ else:
+ peer_type = None
+ address = None
+
+ source_type = None
+ source = None
+ if source_addr:
+ source_type = 'source'
+ source = source_addr
+ elif source_int:
+ source_type = 'source-interface'
+ source = source_int
+
+ if key_id or vrf_name or prefer:
+ if not server and not peer:
+ module.fail_json(
+ msg='Please supply the server or peer parameter')
+
+ args = dict(peer_type=peer_type, address=address, key_id=key_id,
+ prefer=prefer, vrf_name=vrf_name, source_type=source_type,
+ source=source)
+
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+
+ existing, peer_server_list = get_ntp_existing(address, peer_type, module)
+
+ end_state = existing
+ changed = False
+ commands = []
+
+ if state == 'present':
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+ if delta:
+ command = config_ntp(delta, existing)
+ if command:
+ commands.append(command)
+
+ elif state == 'absent':
+ if existing.get('peer_type') and existing.get('address'):
+ command = 'no ntp {0} {1}'.format(
+ existing['peer_type'], existing['address'])
+ if command:
+ commands.append([command])
+
+ existing_source_type = existing.get('source_type')
+ existing_source = existing.get('source')
+ proposed_source_type = proposed.get('source_type')
+ proposed_source = proposed.get('source')
+
+ if proposed_source_type:
+ if proposed_source_type == existing_source_type:
+ if proposed_source == existing_source:
+ command = 'no ntp {0} {1}'.format(
+ existing_source_type, existing_source)
+ if command:
+ commands.append([command])
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_ntp_existing(address, peer_type, module)[0]
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['updates'] = cmds
+ results['changed'] = changed
+ results['end_state'] = end_state
+ results['peer_server_list'] = peer_server_list
+
+ module.exit_json(**results)
+
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_ntp_auth.py b/network/nxos/nxos_ntp_auth.py
new file mode 100644
index 00000000000..25071d6b893
--- /dev/null
+++ b/network/nxos/nxos_ntp_auth.py
@@ -0,0 +1,572 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+
+module: nxos_ntp_auth
+version_added: "2.2"
+short_description: Manages NTP authentication.
+description:
+ - Manages NTP authentication.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+notes:
+ - If C(state=absent), the module will attempt to remove the given key configuration.
+ If a matching key configuration isn't found on the device, the module will fail.
+ - If C(state=absent) and C(authentication=on), authentication will be turned off.
+ - If C(state=absent) and C(authentication=off), authentication will be turned on.
+options:
+ key_id:
+ description:
+ - Authentication key identifier (numeric).
+ required: true
+ md5string:
+ description:
+ - MD5 String.
+ required: true
+ default: null
+ auth_type:
+ description:
+ - Whether the given md5string is in cleartext or
+ has been encrypted. If in cleartext, the device
+ will encrypt it before storing it.
+ required: false
+ default: text
+ choices: ['text', 'encrypt']
+ trusted_key:
+ description:
+ - Whether the given key is required to be supplied by a time source
+ for the device to synchronize to the time source.
+ required: false
+ default: false
+ choices: ['true', 'false']
+ authentication:
+ description:
+ - Turns NTP authentication on or off.
+ required: false
+ default: null
+ choices: ['on', 'off']
+ state:
+ description:
+ - Manage the state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+# Basic NTP authentication configuration
+- nxos_ntp_auth:
+ key_id: 32
+ md5string: hello
+ auth_type: text
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"auth_type": "text", "authentication": "off",
+ "key_id": "32", "md5string": "helloWorld",
+ "trusted_key": "true"}
+existing:
+ description:
+ - k/v pairs of existing ntp authentication
+ type: dict
+ sample: {"authentication": "off", "trusted_key": "false"}
+end_state:
+ description: k/v pairs of ntp authentication after module execution
+ returned: always
+ type: dict
+ sample: {"authentication": "off", "key_id": "32",
+ "md5string": "kapqgWjwdg", "trusted_key": "true"}
+state:
+ description: state as sent in from the playbook
+ returned: always
+ type: string
+ sample: "present"
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["ntp authentication-key 32 md5 helloWorld 0", "ntp trusted-key 32"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0]:
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_ntp_auth(module):
+ command = 'show ntp authentication-status'
+
+ body = execute_show_command(command, module)[0]
+ ntp_auth_str = body['authentication']
+
+ if 'enabled' in ntp_auth_str:
+ ntp_auth = True
+ else:
+ ntp_auth = False
+
+ return ntp_auth
+
+
+def get_ntp_trusted_key(module):
+ trusted_key_list = []
+ command = 'show run | inc ntp.trusted-key'
+
+ trusted_key_str = execute_show_command(
+ command, module, command_type='cli_show_ascii')[0]
+ if trusted_key_str:
+ trusted_keys = trusted_key_str.splitlines()
+
+ else:
+ trusted_keys = []
+
+ for line in trusted_keys:
+ if line:
+ trusted_key_list.append(str(line.split()[2]))
+
+ return trusted_key_list
+
+
+def get_ntp_auth_key(key_id, module):
+ authentication_key = {}
+ command = 'show run | inc ntp.authentication-key.{0}'.format(key_id)
+ auth_regex = (".*ntp\sauthentication-key\s(?P\d+)\s"
+ "md5\s(?P\S+).*")
+
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+
+ try:
+ match_authentication = re.match(auth_regex, body[0], re.DOTALL)
+ group_authentication = match_authentication.groupdict()
+ key_id = group_authentication["key_id"]
+ md5string = group_authentication['md5string']
+ authentication_key['key_id'] = key_id
+ authentication_key['md5string'] = md5string
+ except (AttributeError, TypeError):
+ authentication_key = {}
+
+ return authentication_key
+
+
+def get_ntp_auth_info(key_id, module):
+ auth_info = get_ntp_auth_key(key_id, module)
+ trusted_key_list = get_ntp_trusted_key(module)
+ auth_power = get_ntp_auth(module)
+
+ if key_id in trusted_key_list:
+ auth_info['trusted_key'] = 'true'
+ else:
+ auth_info['trusted_key'] = 'false'
+
+ if auth_power:
+ auth_info['authentication'] = 'on'
+ else:
+ auth_info['authentication'] = 'off'
+
+ return auth_info
+
+
+def auth_type_to_num(auth_type):
+ if auth_type == 'encrypt' :
+ return '7'
+ else:
+ return '0'
+
+
+def set_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
+ ntp_auth_cmds = []
+ auth_type_num = auth_type_to_num(auth_type)
+ ntp_auth_cmds.append(
+ 'ntp authentication-key {0} md5 {1} {2}'.format(
+ key_id, md5string, auth_type_num))
+
+ if trusted_key == 'true':
+ ntp_auth_cmds.append(
+ 'ntp trusted-key {0}'.format(key_id))
+ elif trusted_key == 'false':
+ ntp_auth_cmds.append(
+ 'no ntp trusted-key {0}'.format(key_id))
+
+ if authentication == 'on':
+ ntp_auth_cmds.append(
+ 'ntp authenticate')
+ elif authentication == 'off':
+ ntp_auth_cmds.append(
+ 'no ntp authenticate')
+
+ return ntp_auth_cmds
+
+
+def remove_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
+ auth_remove_cmds = []
+ auth_type_num = auth_type_to_num(auth_type)
+ auth_remove_cmds.append(
+ 'no ntp authentication-key {0} md5 {1} {2}'.format(
+ key_id, md5string, auth_type_num))
+
+ if authentication == 'on':
+ auth_remove_cmds.append(
+ 'no ntp authenticate')
+ elif authentication == 'off':
+ auth_remove_cmds.append(
+ 'ntp authenticate')
+
+ return auth_remove_cmds
+
+
+def main():
+ argument_spec = dict(
+ key_id=dict(required=True, type='str'),
+ md5string=dict(required=True, type='str'),
+ auth_type=dict(choices=['text', 'encrypt'], default='text'),
+ trusted_key=dict(choices=['true', 'false'], default='false'),
+ authentication=dict(choices=['on', 'off']),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ key_id = module.params['key_id']
+ md5string = module.params['md5string']
+ auth_type = module.params['auth_type']
+ trusted_key = module.params['trusted_key']
+ authentication = module.params['authentication']
+ state = module.params['state']
+
+ args = dict(key_id=key_id, md5string=md5string,
+ auth_type=auth_type, trusted_key=trusted_key,
+ authentication=authentication)
+
+ changed = False
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+
+ existing = get_ntp_auth_info(key_id, module)
+ end_state = existing
+
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+
+ commands = []
+ if state == 'present':
+ if delta:
+ command = set_ntp_auth_key(
+ key_id, md5string, auth_type, trusted_key, delta.get('authentication'))
+ if command:
+ commands.append(command)
+ elif state == 'absent':
+ if existing:
+ auth_toggle = None
+ if authentication == existing.get('authentication'):
+ auth_toggle = authentication
+ command = remove_ntp_auth_key(
+ key_id, md5string, auth_type, trusted_key, auth_toggle)
+ if command:
+ commands.append(command)
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ try:
+ execute_config_command(cmds, module)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg=str(clie) + ": " + cmds)
+ end_state = get_ntp_auth_info(key_id, module)
+ delta = dict(set(end_state.iteritems()).difference(existing.iteritems()))
+ if delta or (len(existing) != len(end_state)):
+ changed = True
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['updates'] = cmds
+ results['changed'] = changed
+ results['end_state'] = end_state
+
+ module.exit_json(**results)
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_ntp_options.py b/network/nxos/nxos_ntp_options.py
new file mode 100644
index 00000000000..010e67c8866
--- /dev/null
+++ b/network/nxos/nxos_ntp_options.py
@@ -0,0 +1,519 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+
+module: nxos_ntp_options
+version_added: "2.2"
+short_description: Manages NTP options.
+description:
+ - Manages NTP options, e.g. authoritative server and logging.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+notes:
+ - At least one of C(master) or C(logging) params must be supplied.
+ - When C(state=absent), boolean parameters are flipped,
+ e.g. C(master=true) will disable the authoritative server.
+ - When C(state=absent) and C(master=true), the stratum will be removed as well.
+ - When C(state=absent) and C(master=false), the stratum will be configured
+ to its default value, 8.
+options:
+ master:
+ description:
+ - Sets whether the device is an authoritative NTP server.
+ required: false
+ default: null
+ choices: ['true','false']
+ stratum:
+ description:
+ - If C(master=true), an optional stratum can be supplied (1-15).
+ The device default is 8.
+ required: false
+ default: null
+ logging:
+ description:
+ - Sets whether NTP logging is enabled on the device.
+ required: false
+ default: null
+ choices: ['true','false']
+ state:
+ description:
+ - Manage the state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+EXAMPLES = '''
+# Basic NTP options configuration
+- nxos_ntp_options:
+ master: true
+ stratum: 12
+ logging: false
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"logging": false, "master": true, "stratum": "11"}
+existing:
+ description:
+ - k/v pairs of existing ntp options
+ type: dict
+ sample: {"logging": true, "master": true, "stratum": "8"}
+end_state:
+ description: k/v pairs of ntp options after module execution
+ returned: always
+ type: dict
+ sample: {"logging": false, "master": true, "stratum": "11"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["no ntp logging", "ntp master 11"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0] or response[0] == '\n':
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = str(value)
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def get_ntp_master(module):
+ command = 'show run | inc ntp.master'
+ master_string = execute_show_command(command, module, command_type='cli_show_ascii')
+
+ if master_string:
+ if master_string[0]:
+ master = True
+ else:
+ master = False
+ else:
+ master = False
+
+ if master is True:
+ stratum = str(master_string[0].split()[2])
+ else:
+ stratum = None
+
+ return master, stratum
+
+
+def get_ntp_log(module):
+ command = 'show ntp logging'
+ body = execute_show_command(command, module)[0]
+
+ logging_string = body['loggingstatus']
+ if 'enabled' in logging_string:
+ ntp_log = True
+ else:
+ ntp_log = False
+
+ return ntp_log
+
+
+def get_ntp_options(module):
+ existing = {}
+ existing['logging'] = get_ntp_log(module)
+ existing['master'], existing['stratum'] = get_ntp_master(module)
+
+ return existing
+
+
+def config_ntp_options(delta, flip=False):
+ master = delta.get('master')
+ stratum = delta.get('stratum')
+ log = delta.get('logging')
+ ntp_cmds = []
+
+ if flip:
+ log = not log
+ master = not master
+
+ if log is not None:
+ if log is True:
+ ntp_cmds.append('ntp logging')
+ elif log is False:
+ ntp_cmds.append('no ntp logging')
+ if master is not None:
+ if master is True:
+ if not stratum:
+ stratum = ''
+ ntp_cmds.append('ntp master {0}'.format(stratum))
+ elif master is False:
+ ntp_cmds.append('no ntp master')
+
+ return ntp_cmds
+
+
+def main():
+ argument_spec = dict(
+ master=dict(required=False, type='bool'),
+ stratum=dict(type='str'),
+ logging=dict(required=False, type='bool'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ required_one_of=[['master', 'logging']],
+ supports_check_mode=True)
+
+ master = module.params['master']
+ stratum = module.params['stratum']
+ logging = module.params['logging']
+ state = module.params['state']
+
+ if stratum:
+ if master is None:
+ module.fail_json(msg='The master param must be supplied when '
+ 'stratum is supplied')
+ try:
+ stratum_int = int(stratum)
+ if stratum_int < 1 or stratum_int > 15:
+ raise ValueError
+ except ValueError:
+ module.fail_json(msg='Stratum must be an integer between 1 and 15')
+
+ existing = get_ntp_options(module)
+ end_state = existing
+
+ args = dict(master=master, stratum=stratum, logging=logging)
+
+ changed = False
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+
+ if master is False:
+ proposed['stratum'] = None
+ stratum = None
+
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+ delta_stratum = delta.get('stratum')
+
+ if delta_stratum:
+ delta['master'] = True
+
+ commands = []
+ if state == 'present':
+ if delta:
+ command = config_ntp_options(delta)
+ if command:
+ commands.append(command)
+ elif state == 'absent':
+ if existing:
+ isection = dict(set(proposed.iteritems()).intersection(
+ existing.iteritems()))
+ command = config_ntp_options(isection, flip=True)
+ if command:
+ commands.append(command)
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_ntp_options(module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['updates'] = cmds
+ results['changed'] = changed
+ results['end_state'] = end_state
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_nxapi.py b/network/nxos/nxos_nxapi.py
index 88b0b9150d4..5317869f5aa 100644
--- a/network/nxos/nxos_nxapi.py
+++ b/network/nxos/nxos_nxapi.py
@@ -17,223 +17,255 @@
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
-module: nxos_nxapi
+module: nxos_nxapi
version_added: "2.1"
-author: "Chris Houseknecht (@chouseknecht)"
+author: "Peter Sprygada (@privateip)"
short_description: Manage NXAPI configuration on an NXOS device.
description:
- - Use to enable or disable NXAPI access, set the port and state
- of http and https servers, and enable or disable the sandbox.
- - When enabling NXAPI access the default is to enable HTTP on port
- 80, enable HTTPS on port 443, and enable the web based UI sandbox.
- Use the options below to override the default configuration.
+ - Configures the NXAPI feature on devices running Cisco NXOS. The
+ NXAPI feature is absent from the configuration by default. Since
+ this module manages the NXAPI feature it only supports the use
+ of the C(Cli) transport.
extends_documentation_fragment: nxos
options:
- state:
- description:
- - Set to started or stopped. A state of started will
- enable NXAPI access, and a state of stopped will
- disable or shutdown all NXAPI access.
- choices:
- - started
- - stopped
- required: false
- default: started
- http_port:
- description:
- - Port on which the HTTP server will listen.
- required: false
- default: 80
- https_port:
- description:
- - Port on which the HTTPS server will listen.
- required: false
- default: 443
- http:
- description:
- - Enable/disable HTTP server.
- required: false
- default: true
- choices:
- - true
- - false
- aliases:
- - enable_http
- https:
- description:
- - Enable/disable HTTPS server.
- required: false
- choices:
- - true
- - false
- default: true
- aliases:
- - enable_https
- sandbox:
- description:
- - Enable/disable NXAPI web based UI for entering commands.
- required: false
- default: true
- choices:
- - true
- - false
- aliases:
- - enable_sandbox
+ http_port:
+ description:
+ - Configure the port with which the HTTP server will listen on
+ for requests. By default, NXAPI will bind the HTTP service
+ to the standard HTTP port 80. This argument accepts valid
+ port values in the range of 1 to 65535.
+ required: false
+ default: 80
+ http:
+ description:
+ - Controls the operating state of the HTTP protocol as one of the
+ underlying transports for NXAPI. By default, NXAPI will enable
+ the HTTP transport when the feature is first configured. To
+ disable the use of the HTTP transport, set the value of this
+ argument to False.
+ required: false
+ default: yes
+ choices: ['yes', 'no']
+ aliases: ['enable_http']
+ https_port:
+ description:
+ - Configure the port with which the HTTPS server will listen on
+ for requests. By default, NXAPI will bind the HTTPS service
+ to the standard HTTPS port 443. This argument accepts valid
+ port values in the range of 1 to 65535.
+ required: false
+ default: 443
+ https:
+ description:
+ - Controls the operating state of the HTTPS protocol as one of the
+ underlying transports for NXAPI. By default, NXAPI will disable
+ the HTTPS transport when the feature is first configured. To
+ enable the use of the HTTPS transport, set the value of this
+ argument to True.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ aliases: ['enable_https']
+ sandbox:
+ description:
+ - The NXAPI feature provides a web base UI for developers for
+ entering commands. This feature is initially disabled when
+ the NXAPI feature is configured for the first time. When the
+ C(sandbox) argument is set to True, the developer sandbox URL
+ will accept requests and when the value is set to False, the
+ sandbox URL is unavailable.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ aliases: ['enable_sandbox']
+ config:
+ description:
+ - The C(config) argument provides an optional argument to
+ specify the device running-config to used as the basis for
+ configuring the remote system. The C(config) argument accepts
+ a string value that represents the device configuration.
+ required: false
+ default: null
+ version_added: "2.2"
+ state:
+ description:
+ - The C(state) argument controls whether or not the NXAPI
+ feature is configured on the remote device. When the value
+ is C(present) the NXAPI feature configuration is present in
+ the device running-config. When the values is C(absent) the
+ feature configuration is removed from the running-config.
+ choices: ['present', 'absent']
+ required: false
+ default: present
"""
EXAMPLES = """
- - name: Enable NXAPI access with default configuration
- nxos_nxapi:
- provider: {{ provider }}
-
- - name: Enable NXAPI with no HTTP, HTTPS at port 9443 and sandbox disabled
- nxos_nxapi:
- enable_http: false
- https_port: 9443
- enable_sandbox: no
- provider: {{ provider }}
-
- - name: shutdown NXAPI access
- nxos_nxapi:
- state: stopped
- provider: {{ provider }}
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: admin
+ password: admin
+
+- name: Enable NXAPI access with default configuration
+ nxos_nxapi:
+ provider: "{{ cli }}"
+
+- name: Enable NXAPI with no HTTP, HTTPS at port 9443 and sandbox disabled
+ nxos_nxapi:
+ enable_http: false
+ https_port: 9443
+ https: yes
+ enable_sandbox: no
+ provider: "{{ cli }}"
+
+- name: remove NXAPI configuration
+ nxos_nxapi:
+ state: absent
+ provider: "{{ cli }}"
"""
RETURN = """
-changed:
- description:
- - Indicates if commands were sent to the device.
- returned: always
- type: boolean
- sample: false
-
-commands:
- description:
- - Set of commands to be executed on remote device. If run in check mode,
- commands will not be executed.
- returned: always
- type: list
- sample: [
- 'nxapi feature',
- 'nxapi http port 8080'
- ]
-
-_config:
- description:
- - Configuration found on the device prior ro any commands being executed.
- returned: always
- type: object
- sample: {...}
+updates:
+ description:
+ - Returns the list of commands that need to be pushed into the remote
+ device to satisfy the arguments
+ returned: always
+ type: list
+ sample: ['no feature nxapi']
"""
+import re
+import time
+
+from ansible.module_utils.netcfg import NetworkConfig, dumps
+from ansible.module_utils.nxos import NetworkModule, NetworkError
+from ansible.module_utils.basic import get_exception
+
+PRIVATE_KEYS_RE = re.compile('__.+__')
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+def get_instance(module):
+ instance = dict(state='absent')
+ try:
+ resp = module.cli('show nxapi', 'json')
+ except NetworkError:
+ return instance
+
+ instance['state'] = 'present'
+
+ instance['http'] = 'http_port' in resp[0]
+ instance['http_port'] = resp[0].get('http_port') or 80
+ instance['https'] = 'https_port' in resp[0]
+ instance['https_port'] = resp[0].get('https_port') or 443
-def http_commands(protocol, port, enable, config):
- port_config = config.get('{0}_port'.format(protocol), None)
- changed = False
- commands = []
- if port_config is None and enable:
- # enable
- changed = True
- commands.append('nxapi {0} port {1}'.format(protocol, port))
- elif port_config is not None:
- if not enable:
- # disable
- commands.append('no nxapi {0}'.format(protocol))
- changed = True
- elif port_config != port:
- # update port
- commands.append('nxapi {0} port {1}'.format(protocol, port))
- changed = True
- return commands, changed
-
-
-def execute_commands(module, commands):
- if not module.params.get('check_mode'):
- module.configure(commands)
-
-
-def get_nxapi_state(module):
- features = module.execute(['show feature | grep nxapi'])[0]
- if re.search('disabled', features) is None:
- return 'started'
- return 'stopped'
-
-
-def config_server(module):
-
- nxapi_state = get_nxapi_state(module)
-
- config = dict()
- if nxapi_state == 'started':
- config = module.from_json(module.execute(['show nxapi | json'])[0])
-
- state = module.params.get('state')
- result = dict(changed=False, _config=config, commands=[])
- commands = []
-
- if config.get('nxapi_status', 'Disabled') == 'Disabled':
- if state == 'started':
- # enable nxapi and get the new default config
- commands.append('feature nxapi')
- result['_config'] = dict()
- result['changed'] = True
- if module.params.get('check_mode'):
- # make an assumption about default state
- config['http_port'] = 80
- config['sandbox_status'] = 'Disabled'
- else:
- # get the default config
- execute_commands(module, commands)
- config = module.from_json(module.execute(['show nxapi | json'])[0])
- else:
- # nxapi already disabled
- return result
- elif config.get('nxapi_status', 'Disabled') == 'Enabled' and state == 'stopped':
- # disable nxapi and exit
+ instance['sandbox'] = resp[0]['sandbox_status']
+
+ return instance
+
+def present(module, instance, commands):
+ commands.append('feature nxapi')
+ setters = set()
+ for key, value in module.argument_spec.iteritems():
+ setter = value.get('setter') or 'set_%s' % key
+ if setter not in setters:
+ setters.add(setter)
+ if module.params[key] is not None:
+ invoke(setter, module, instance, commands)
+
+def absent(module, instance, commands):
+ if instance['state'] != 'absent':
commands.append('no feature nxapi')
+
+def set_http(module, instance, commands):
+ port = module.params['http_port']
+ if not 0 <= port <= 65535:
+ module.fail_json(msg='http_port must be between 1 and 65535')
+ elif module.params['http'] is True:
+ commands.append('nxapi http port %s' % port)
+ elif module.params['http'] is False:
+ commands.append('no nxapi http')
+
+def set_https(module, instance, commands):
+ port = module.params['https_port']
+ if not 0 <= port <= 65535:
+ module.fail_json(msg='https_port must be between 1 and 65535')
+ elif module.params['https'] is True:
+ commands.append('nxapi https port %s' % port)
+ elif module.params['https'] is False:
+ commands.append('no nxapi https')
+
+def set_sandbox(module, instance, commands):
+ if module.params['sandbox'] is True:
+ commands.append('nxapi sandbox')
+ elif module.params['sandbox'] is False:
+ commands.append('no nxapi sandbox')
+
+def get_config(module):
+ contents = module.params['config']
+ if not contents:
+ try:
+ contents = module.cli(['show running-config nxapi all'])[0]
+ except NetworkError:
+ contents = None
+ config = NetworkConfig(indent=2)
+ if contents:
+ config.load(contents)
+ return config
+
+def load_checkpoint(module, result):
+ try:
+ checkpoint = result['__checkpoint__']
+ module.cli(['rollback running-config checkpoint %s' % checkpoint,
+ 'no checkpoint %s' % checkpoint], output='text')
+ except KeyError:
+ module.fail_json(msg='unable to rollback, checkpoint not found')
+ except NetworkError:
+ exc = get_exception()
+ msg = 'unable to rollback configuration'
+ module.fail_json(msg=msg, checkpoint=checkpoint, **exc.kwargs)
+
+def load_config(module, commands, result):
+ # create a config checkpoint
+ checkpoint = 'ansible_%s' % int(time.time())
+ module.cli(['checkpoint %s' % checkpoint], output='text')
+ result['__checkpoint__'] = checkpoint
+
+ # load the config into the device
+ module.config.load_config(commands)
+
+ # load was successfully, remove the config checkpoint
+ module.cli(['no checkpoint %s' % checkpoint])
+
+def load(module, commands, result):
+ candidate = NetworkConfig(indent=2, contents='\n'.join(commands))
+ config = get_config(module)
+ configobjs = candidate.difference(config)
+
+ if configobjs:
+ commands = dumps(configobjs, 'commands').split('\n')
+ result['updates'] = commands
+ if not module.check_mode:
+ load_config(module, commands, result)
result['changed'] = True
- result['commands'] = commands
- execute_commands(module, commands)
- return result
-
- # configure http and https
- for protocol in ['http', 'https']:
- cmds, chg = http_commands(protocol, module.params['{0}_port'.format(protocol)],
- module.params[protocol], config)
- if chg:
- commands += cmds
- result['changed'] = True
-
- # configure sandbox
- config_sandbox = config.get('sandbox_status', None)
- enable_sandbox = module.params.get('sandbox')
-
- if config_sandbox is None:
- # there is no prior state, so we must set one
- result['changed'] = True
- if enable_sandbox:
- commands.append('nxapi sandbox')
- else:
- commands.append('no nxapi sandbox')
- else:
- # there is a prior state, so be idempotent
- if config_sandbox == 'Enabled' and not enable_sandbox:
- # turn off sandbox
- commands.append('no nxapi sandbox')
- result['changed'] = True
- elif config_sandbox == 'Disabled' and enable_sandbox:
- # turn on sandbox
- commands.append('nxapi sandbox')
- result['changed'] = True
-
- if len(commands) > 0:
- # something requires change
- result['commands'] = commands
- execute_commands(module, commands)
-
- return result
+
+def clean_result(result):
+ # strip out any keys that have two leading and two trailing
+ # underscore characters
+ for key in result.keys():
+ if PRIVATE_KEYS_RE.match(key):
+ del result[key]
def main():
@@ -241,28 +273,58 @@ def main():
"""
argument_spec = dict(
- state=dict(default='started', choices=['started', 'stopped']),
- http_port=dict(default=80, type='int'),
- https_port=dict(default=443, type='int'),
- http=dict(aliases=['enable_http'], default=True, type='bool'),
- https=dict(aliases=['enable_https'], default=True, type='bool'),
- sandbox=dict(aliases=['enable_sandbox'], default=True, type='bool'),
-
- # Only allow configuration of NXAPI using cli transpsort
- transport=dict(required=True, choices=['cli'])
+ http=dict(aliases=['enable_http'], default=True, type='bool', setter='set_http'),
+ http_port=dict(default=80, type='int', setter='set_http'),
+
+ https=dict(aliases=['enable_https'], default=False, type='bool', setter='set_https'),
+ https_port=dict(default=443, type='int', setter='set_https'),
+
+ sandbox=dict(aliases=['enable_sandbox'], default=False, type='bool'),
+
+ # Only allow configuration of NXAPI using cli transport
+ transport=dict(required=True, choices=['cli']),
+
+ config=dict(),
+
+ # Support for started and stopped is for backwards capability only and
+ # will be removed in a future version
+ state=dict(default='present', choices=['started', 'stopped', 'present', 'absent'])
)
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = NetworkModule(argument_spec=argument_spec,
+ connect_on_load=False,
+ supports_check_mode=True)
+
+ state = module.params['state']
+
+ warnings = list()
+
+ result = dict(changed=False, warnings=warnings)
- result = config_server(module)
+ if state == 'started':
+ state = 'present'
+ warnings.append('state=started is deprecated and will be removed in a '
+ 'a future release. Please use state=present instead')
+ elif state == 'stopped':
+ state = 'absent'
+ warnings.append('state=stopped is deprecated and will be removed in a '
+ 'a future release. Please use state=absent instead')
- return module.exit_json(**result)
+ commands = list()
+ instance = get_instance(module)
+
+ invoke(state, module, instance, commands)
+
+ try:
+ load(module, commands, result)
+ except (ValueError, NetworkError):
+ load_checkpoint(module, result)
+ exc = get_exception()
+ module.fail_json(msg=str(exc), **exc.kwargs)
+
+ clean_result(result)
+ module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.nxos import *
if __name__ == '__main__':
main()
-
diff --git a/network/nxos/nxos_ospf.py b/network/nxos/nxos_ospf.py
new file mode 100644
index 00000000000..024b5f0ba24
--- /dev/null
+++ b/network/nxos/nxos_ospf.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_ospf
+version_added: "2.2"
+short_description: Manages configuration of an ospf instance.
+description:
+ - Manages configuration of an ospf instance.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+options:
+ ospf:
+ description:
+ - Name of the ospf instance.
+ required: true
+ state:
+ description:
+ - Determines whether the config should be present or not
+ on the device.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+- nxos_ospf:
+ ospf: 1
+ state: present
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"ospf": "1"}
+existing:
+ description: k/v pairs of existing configuration
+ returned: verbose mode
+ type: dict
+ sample: {"ospf": ["2"]}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"ospf": ["1", "2"]}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["router ospf 1"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+# COMMON CODE FOR MIGRATION
+import re
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.shell import ShellError
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+PARAM_TO_COMMAND_KEYMAP = {
+ 'ospf': 'router ospf'
+}
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(config, module):
+ splitted_config = config.splitlines()
+ value_list = []
+ REGEX = '^router ospf\s(?P\S+).*'
+ for line in splitted_config:
+ value = ''
+ if 'router ospf' in line:
+ try:
+ match_ospf = re.match(REGEX, line, re.DOTALL)
+ ospf_group = match_ospf.groupdict()
+ value = ospf_group['ospf']
+ except AttributeError:
+ value = ''
+ if value:
+ value_list.append(value)
+
+ return value_list
+
+
+def get_existing(module):
+ existing = {}
+ config = str(get_config(module))
+
+ value = get_value(config, module)
+ if value:
+ existing['ospf'] = value
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def state_present(module, proposed, candidate):
+ commands = ['router ospf {0}'.format(proposed['ospf'])]
+ candidate.add(commands, parents=[])
+
+
+def state_absent(module, proposed, candidate):
+ commands = ['no router ospf {0}'.format(proposed['ospf'])]
+ candidate.add(commands, parents=[])
+
+
+def main():
+ argument_spec = dict(
+ ospf=dict(required=True, type='str'),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ ospf = str(module.params['ospf'])
+
+ existing = invoke('get_existing', module)
+ end_state = existing
+ proposed = dict(ospf=ospf)
+
+ if not existing:
+ existing_list = []
+ else:
+ existing_list = existing['ospf']
+
+ result = {}
+ if (state == 'present' or (state == 'absent' and ospf in existing_list)):
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_ospf_vrf.py b/network/nxos/nxos_ospf_vrf.py
new file mode 100644
index 00000000000..35e0d398f04
--- /dev/null
+++ b/network/nxos/nxos_ospf_vrf.py
@@ -0,0 +1,593 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_ospf_vrf
+version_added: "2.2"
+short_description: Manages a VRF for an OSPF router.
+description:
+ - Manages a VRF for an OSPF router.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - Value I(default) restores params default value, if any.
+ Otherwise it removes the existing param configuration.
+options:
+ vrf:
+ description:
+ - Name of the resource instance. Valid value is a string.
+ The name 'default' is a valid VRF representing the global OSPF.
+ required: false
+ default: default
+ ospf:
+ description:
+ - Name of the OSPF instance.
+ required: true
+ default: null
+ router_id:
+ description:
+ - Router Identifier (ID) of the OSPF router VRF instance.
+ required: false
+ default: null
+ default_metric:
+ description:
+ - Specify the default Metric value. Valid values are an integer
+ or the keyword 'default'.
+ required: false
+ default: null
+ log_adjacency:
+ description:
+ - Controls the level of log messages generated whenever a
+ neighbor changes state. Valid values are 'log', 'detail',
+ and 'default'.
+ required: false
+ choices: ['log','detail','default']
+ default: null
+ timer_throttle_lsa_start:
+ description:
+ - Specify the start interval for rate-limiting Link-State
+ Advertisement (LSA) generation. Valid values are an integer,
+ in milliseconds, or the keyword 'default'.
+ required: false
+ default: null
+ timer_throttle_lsa_hold:
+ description:
+ - Specify the hold interval for rate-limiting Link-State
+ Advertisement (LSA) generation. Valid values are an integer,
+ in milliseconds, or the keyword 'default'.
+ required: false
+ default: null
+ timer_throttle_lsa_max:
+ description:
+ - Specify the max interval for rate-limiting Link-State
+ Advertisement (LSA) generation. Valid values are an integer,
+ in milliseconds, or the keyword 'default'.
+ required: false
+ default: null
+ timer_throttle_spf_start:
+ description:
+ - Specify initial Shortest Path First (SPF) schedule delay.
+ Valid values are an integer, in milliseconds, or
+ the keyword 'default'.
+ required: false
+ default: null
+ timer_throttle_spf_hold:
+ description:
+ - Specify minimum hold time between Shortest Path First (SPF)
+ calculations. Valid values are an integer, in milliseconds,
+ or the keyword 'default'.
+ required: false
+ default: null
+ timer_throttle_spf_max:
+ description:
+ - Specify the maximum wait time between Shortest Path First (SPF)
+ calculations. Valid values are an integer, in milliseconds,
+ or the keyword 'default'.
+ required: false
+ default: null
+ auto_cost:
+ description:
+ - Specifies the reference bandwidth used to assign OSPF cost.
+ Valid values are an integer, in Mbps, or the keyword 'default'.
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+- nxos_ospf_vrf:
+ ospf: 1
+ timer_throttle_spf_start: 50
+ timer_throttle_spf_hold: 1000
+ timer_throttle_spf_max: 2000
+ timer_throttle_lsa_start: 60
+ timer_throttle_lsa_hold: 1100
+ timer_throttle_lsa_max: 3000
+ vrf: test
+ state: present
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"ospf": "1", "timer_throttle_lsa_hold": "1100",
+ "timer_throttle_lsa_max": "3000", "timer_throttle_lsa_start": "60",
+ "timer_throttle_spf_hold": "1000",
+ "timer_throttle_spf_max": "2000", "timer_throttle_spf_start": "50",
+ "vrf": "test"}
+existing:
+ description: k/v pairs of existing configuration
+ returned: verbose mode
+ type: dict
+ sample: {"auto_cost": "40000", "default_metric": "", "log_adjacency": "",
+ "ospf": "1", "router_id": "", "timer_throttle_lsa_hold": "5000",
+ "timer_throttle_lsa_max": "5000", "timer_throttle_lsa_start": "0",
+ "timer_throttle_spf_hold": "1000",
+ "timer_throttle_spf_max": "5000",
+ "timer_throttle_spf_start": "200", "vrf": "test"}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"auto_cost": "40000", "default_metric": "", "log_adjacency": "",
+ "ospf": "1", "router_id": "", "timer_throttle_lsa_hold": "1100",
+ "timer_throttle_lsa_max": "3000", "timer_throttle_lsa_start": "60",
+ "timer_throttle_spf_hold": "1000",
+ "timer_throttle_spf_max": "2000", "timer_throttle_spf_start": "50",
+ "vrf": "test"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["router ospf 1", "vrf test", "timers throttle lsa 60 1100 3000",
+ "timers throttle spf 50 1000 2000"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.shell import ShellError
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+PARAM_TO_COMMAND_KEYMAP = {
+ 'router_id': 'router-id',
+ 'default_metric': 'default-metric',
+ 'log_adjacency': 'log-adjacency-changes',
+ 'timer_throttle_lsa_start': 'timers throttle lsa',
+ 'timer_throttle_lsa_max': 'timers throttle lsa',
+ 'timer_throttle_lsa_hold': 'timers throttle lsa',
+ 'timer_throttle_spf_max': 'timers throttle spf',
+ 'timer_throttle_spf_start': 'timers throttle spf',
+ 'timer_throttle_spf_hold': 'timers throttle spf',
+ 'auto_cost': 'auto-cost reference-bandwidth'
+}
+PARAM_TO_DEFAULT_KEYMAP = {
+ 'timer_throttle_lsa_start': '0',
+ 'timer_throttle_lsa_max': '5000',
+ 'timer_throttle_lsa_hold': '5000',
+ 'timer_throttle_spf_start': '200',
+ 'timer_throttle_spf_max': '5000',
+ 'timer_throttle_spf_hold': '1000',
+ 'auto_cost': '40000'
+}
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(arg, config, module):
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ if arg == 'log_adjacency':
+ if 'log-adjacency-changes detail' in config:
+ value = 'detail'
+ else:
+ value = 'log'
+ else:
+ value_list = REGEX.search(config).group('value').split()
+ if 'hold' in arg:
+ value = value_list[1]
+ elif 'max' in arg:
+ value = value_list[2]
+ elif 'auto' in arg:
+ if 'Gbps' in value_list:
+ value = str(int(value_list[0]) * 1000)
+ else:
+ value = value_list[0]
+ else:
+ value = value_list[0]
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ netcfg = get_config(module)
+ parents = ['router ospf {0}'.format(module.params['ospf'])]
+
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+
+ config = netcfg.get_section(parents)
+ if config:
+ if module.params['vrf'] == 'default':
+ splitted_config = config.splitlines()
+ vrf_index = False
+ for index in range(0, len(splitted_config) - 1):
+ if 'vrf' in splitted_config[index].strip():
+ vrf_index = index
+ break
+ if vrf_index:
+ config = '\n'.join(splitted_config[0:vrf_index])
+
+ for arg in args:
+ if arg not in ['ospf', 'vrf']:
+ existing[arg] = get_value(arg, config, module)
+
+ existing['vrf'] = module.params['vrf']
+ existing['ospf'] = module.params['ospf']
+
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def state_present(module, existing, proposed, candidate):
+ commands = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ for key, value in proposed_commands.iteritems():
+ if value is True:
+ commands.append(key)
+
+ elif value is False:
+ commands.append('no {0}'.format(key))
+
+ elif value == 'default':
+ if existing_commands.get(key):
+ existing_value = existing_commands.get(key)
+ commands.append('no {0} {1}'.format(key, existing_value))
+ else:
+ if key == 'timers throttle lsa':
+ command = '{0} {1} {2} {3}'.format(
+ key,
+ proposed['timer_throttle_lsa_start'],
+ proposed['timer_throttle_lsa_hold'],
+ proposed['timer_throttle_lsa_max'])
+ elif key == 'timers throttle spf':
+ command = '{0} {1} {2} {3}'.format(
+ key,
+ proposed['timer_throttle_spf_start'],
+ proposed['timer_throttle_spf_hold'],
+ proposed['timer_throttle_spf_max'])
+ elif key == 'log-adjacency-changes':
+ if value == 'log':
+ command = key
+ elif value == 'detail':
+ command = '{0} {1}'.format(key, value)
+ elif key == 'auto-cost reference-bandwidth':
+ if len(value) < 5:
+ command = '{0} {1} Mbps'.format(key, value)
+ else:
+ value = str(int(value) / 1000)
+ command = '{0} {1} Gbps'.format(key, value)
+ else:
+ command = '{0} {1}'.format(key, value.lower())
+
+ if command not in commands:
+ commands.append(command)
+
+ if commands:
+ parents = ['router ospf {0}'.format(module.params['ospf'])]
+ if module.params['vrf'] != 'default':
+ parents.append('vrf {0}'.format(module.params['vrf']))
+
+ candidate.add(commands, parents=parents)
+
+
+def state_absent(module, existing, proposed, candidate):
+ commands = []
+ parents = ['router ospf {0}'.format(module.params['ospf'])]
+ if module.params['vrf'] == 'default':
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+ for key, value in existing_commands.iteritems():
+ if value:
+ if key == 'timers throttle lsa':
+ command = 'no {0} {1} {2} {3}'.format(
+ key,
+ existing['timer_throttle_lsa_start'],
+ existing['timer_throttle_lsa_hold'],
+ existing['timer_throttle_lsa_max'])
+ elif key == 'timers throttle spf':
+ command = 'no {0} {1} {2} {3}'.format(
+ key,
+ existing['timer_throttle_spf_start'],
+ existing['timer_throttle_spf_hold'],
+ existing['timer_throttle_spf_max'])
+ else:
+ existing_value = existing_commands.get(key)
+ command = 'no {0} {1}'.format(key, existing_value)
+
+ if command not in commands:
+ commands.append(command)
+ else:
+ commands = ['no vrf {0}'.format(module.params['vrf'])]
+ candidate.add(commands, parents=parents)
+
+
+def main():
+ argument_spec = dict(
+ vrf=dict(required=False, type='str', default='default'),
+ ospf=dict(required=True, type='str'),
+ router_id=dict(required=False, type='str'),
+ default_metric=dict(required=False, type='str'),
+ log_adjacency=dict(required=False, type='str',
+ choices=['log', 'detail', 'default']),
+ timer_throttle_lsa_start=dict(required=False, type='str'),
+ timer_throttle_lsa_hold=dict(required=False, type='str'),
+ timer_throttle_lsa_max=dict(required=False, type='str'),
+ timer_throttle_spf_start=dict(required=False, type='str'),
+ timer_throttle_spf_hold=dict(required=False, type='str'),
+ timer_throttle_spf_max=dict(required=False, type='str'),
+ auto_cost=dict(required=False, type='str'),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ args = [
+ 'vrf',
+ 'ospf',
+ 'router_id',
+ 'default_metric',
+ 'log_adjacency',
+ 'timer_throttle_lsa_start',
+ 'timer_throttle_lsa_hold',
+ 'timer_throttle_lsa_max',
+ 'timer_throttle_spf_start',
+ 'timer_throttle_spf_hold',
+ 'timer_throttle_spf_max',
+ 'auto_cost'
+ ]
+
+ existing = invoke('get_existing', module, args)
+ end_state = existing
+ proposed_args = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ proposed = {}
+ for key, value in proposed_args.iteritems():
+ if key != 'interface':
+ if str(value).lower() == 'true':
+ value = True
+ elif str(value).lower() == 'false':
+ value = False
+ elif str(value).lower() == 'default':
+ value = PARAM_TO_DEFAULT_KEYMAP.get(key)
+ if value is None:
+ value = 'default'
+ if existing.get(key) or (not existing.get(key) and value):
+ proposed[key] = value
+
+ result = {}
+ if state == 'present' or (state == 'absent' and existing):
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed_args
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_overlay_global.py b/network/nxos/nxos_overlay_global.py
new file mode 100644
index 00000000000..5b7d89c4b29
--- /dev/null
+++ b/network/nxos/nxos_overlay_global.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_overlay_global
+version_added: "2.2"
+short_description: Configures anycast gateway MAC of the switch.
+description:
+ - Configures anycast gateway MAC of the switch.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - Default restores params default value
+ - Supported MAC address format are "E.E.E", "EE-EE-EE-EE-EE-EE",
+ "EE:EE:EE:EE:EE:EE" and "EEEE.EEEE.EEEE"
+options:
+ anycast_gateway_mac:
+ description:
+ - Anycast gateway mac of the switch.
+ required: true
+ default: null
+'''
+
+EXAMPLES = '''
+- nxos_overlay_global:
+ anycast_gateway_mac: "b.b.b"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"}
+existing:
+ description: k/v pairs of existing BGP configuration
+ returned: verbose mode
+ type: dict
+ sample: {"asn": "65535", "bestpath_always_compare_med": false,
+ "bestpath_aspath_multipath_relax": false,
+ "bestpath_compare_neighborid": false,
+ "bestpath_compare_routerid": false,
+ "bestpath_cost_community_ignore": false,
+ "bestpath_med_confed": false,
+ "bestpath_med_missing_as_worst": false,
+ "bestpath_med_non_deterministic": false, "cluster_id": "",
+ "confederation_id": "", "confederation_peers": "",
+ "graceful_restart": true, "graceful_restart_helper": false,
+ "graceful_restart_timers_restart": "120",
+ "graceful_restart_timers_stalepath_time": "300", "local_as": "",
+ "log_neighbor_changes": false, "maxas_limit": "",
+ "neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
+ "router_id": "11.11.11.11", "suppress_fib_pending": false,
+ "timer_bestpath_limit": "", "timer_bgp_hold": "180",
+ "timer_bgp_keepalive": "60", "vrf": "test"}
+end_state:
+ description: k/v pairs of BGP configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"asn": "65535", "bestpath_always_compare_med": false,
+ "bestpath_aspath_multipath_relax": false,
+ "bestpath_compare_neighborid": false,
+ "bestpath_compare_routerid": false,
+ "bestpath_cost_community_ignore": false,
+ "bestpath_med_confed": false,
+ "bestpath_med_missing_as_worst": false,
+ "bestpath_med_non_deterministic": false, "cluster_id": "",
+ "confederation_id": "", "confederation_peers": "",
+ "graceful_restart": true, "graceful_restart_helper": false,
+ "graceful_restart_timers_restart": "120",
+ "graceful_restart_timers_stalepath_time": "300", "local_as": "",
+ "log_neighbor_changes": false, "maxas_limit": "",
+ "neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
+ "router_id": "1.1.1.1", "suppress_fib_pending": false,
+ "timer_bestpath_limit": "", "timer_bgp_hold": "180",
+ "timer_bgp_keepalive": "60", "vrf": "test"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+PARAM_TO_COMMAND_KEYMAP = {
+ 'anycast_gateway_mac': 'fabric forwarding anycast-gateway-mac',
+}
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(arg, config, module):
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ config = str(get_config(module))
+
+ for arg in args:
+ existing[arg] = get_value(arg, config, module)
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def get_commands(module, existing, proposed, candidate):
+ commands = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ for key, value in proposed_commands.iteritems():
+ if value == 'default':
+ existing_value = existing_commands.get(key)
+ if existing_value:
+ commands.append('no {0} {1}'.format(key, existing_value))
+ else:
+ if 'anycast-gateway-mac' in key:
+ value = normalize_mac(value, module)
+ command = '{0} {1}'.format(key, value)
+ commands.append(command)
+
+ if commands:
+ candidate.add(commands, parents=[])
+
+
+def normalize_mac(proposed_mac, module):
+ try:
+ if '-' in proposed_mac:
+ splitted_mac = proposed_mac.split('-')
+ if len(splitted_mac) != 6:
+ raise ValueError
+
+ for octect in splitted_mac:
+ if len(octect) != 2:
+ raise ValueError
+
+ elif '.' in proposed_mac:
+ splitted_mac = []
+ splitted_dot_mac = proposed_mac.split('.')
+ if len(splitted_dot_mac) != 3:
+ raise ValueError
+
+ for octect in splitted_dot_mac:
+ if len(octect) > 4:
+ raise ValueError
+ else:
+ octect_len = len(octect)
+ padding = 4 - octect_len
+ splitted_mac.append(octect.zfill(padding+1))
+
+ elif ':' in proposed_mac:
+ splitted_mac = proposed_mac.split(':')
+ if len(splitted_mac) != 6:
+ raise ValueError
+
+ for octect in splitted_mac:
+ if len(octect) != 2:
+ raise ValueError
+ else:
+ raise ValueError
+ except ValueError:
+ module.fail_json(msg='Invalid MAC address format',
+ proposed_mac=proposed_mac)
+
+ joined_mac = ''.join(splitted_mac)
+ mac = [joined_mac[i:i+4] for i in range(0, len(joined_mac), 4)]
+ return '.'.join(mac).upper()
+
+
+def main():
+ argument_spec = dict(
+ anycast_gateway_mac=dict(required=True, type='str'),
+ m_facts=dict(required=False, default=False, type='bool'),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ args = [
+ 'anycast_gateway_mac'
+ ]
+
+ existing = invoke('get_existing', module, args)
+ end_state = existing
+ proposed = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ result = {}
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('get_commands', module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_pim.py b/network/nxos/nxos_pim.py
new file mode 100644
index 00000000000..659686ceaa2
--- /dev/null
+++ b/network/nxos/nxos_pim.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_pim
+version_added: "2.2"
+short_description: Manages configuration of a PIM instance.
+description:
+ - Manages configuration of a Protocol Independent Multicast (PIM) instance.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+options:
+ ssm_range:
+ description:
+ - Configure group ranges for Source Specific Multicast (SSM).
+ Valid values are multicast addresses or the keyword 'none'.
+ required: true
+'''
+EXAMPLES = '''
+- nxos_pim:
+ ssm_range: "232.0.0.0/8"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"ssm_range": "232.0.0.0/8"}
+existing:
+ description: k/v pairs of existing PIM configuration
+ returned: verbose mode
+ type: dict
+ sample: {"ssm_range": none}
+end_state:
+ description: k/v pairs of BGP configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"ssm_range": "232.0.0.0/8"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["ip pim ssm range 232.0.0.0/8"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+PARAM_TO_COMMAND_KEYMAP = {
+ 'ssm_range': 'ip pim ssm range'
+}
+PARAM_TO_DEFAULT_KEYMAP = {}
+WARNINGS = []
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(arg, config, module):
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ config = str(get_config(module))
+ for arg in args:
+ existing[arg] = get_value(arg, config, module)
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def get_commands(module, existing, proposed, candidate):
+ commands = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ for key, value in proposed_commands.iteritems():
+ command = '{0} {1}'.format(key, value)
+ commands.append(command)
+
+ if commands:
+ candidate.add(commands, parents=[])
+
+
+def main():
+ argument_spec = dict(
+ ssm_range=dict(required=True, type='str'),
+ m_facts=dict(required=False, default=False, type='bool'),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ splitted_ssm_range = module.params['ssm_range'].split('.')
+ if len(splitted_ssm_range) != 4 and module.params['ssm_range'] != 'none':
+ module.fail_json(msg="Valid ssm_range values are multicast addresses "
+ "or the keyword 'none'.")
+
+ args = [
+ 'ssm_range'
+ ]
+
+ existing = invoke('get_existing', module, args)
+ end_state = existing
+ proposed = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ result = {}
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('get_commands', module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed
+
+ if WARNINGS:
+ result['warnings'] = WARNINGS
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_pim_interface.py b/network/nxos/nxos_pim_interface.py
new file mode 100644
index 00000000000..119785d47ed
--- /dev/null
+++ b/network/nxos/nxos_pim_interface.py
@@ -0,0 +1,932 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_pim_interface
+version_added: "2.2"
+short_description: Manages PIM interface configuration.
+description:
+ - Manages PIM interface configuration settings.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+notes:
+ - When C(state=default), supported params will be reset to a default state.
+ These include C(dr_prio), C(hello_auth_key), C(hello_interval), C(jp_policy_out),
+ C(jp_policy_in), C(jp_type_in), C(jp_type_out), C(border), C(neighbor_policy),
+ C(neighbor_type).
+ - The C(hello_auth_key) param is not idempotent.
+ - C(hello_auth_key) only supports clear text passwords.
+ - When C(state=absent), pim interface configuration will be set to defaults and pim-sm
+ will be disabled on the interface.
+ - PIM must be enabled on the device to use this module.
+ - This module is for Layer 3 interfaces.
+options:
+ interface:
+ description:
+ - Full name of the interface such as Ethernet1/33.
+ required: true
+ sparse:
+ description:
+ - Enable/disable sparse-mode on the interface.
+ required: false
+ default: true
+ choices: ['true', 'false']
+ hello_auth_key:
+ description:
+ - Authentication for hellos on this interface.
+ required: false
+ default: null
+ hello_interval:
+ description:
+ - Hello interval in milliseconds for this interface.
+ required: false
+ default: null
+ choices: ['true', 'false']
+ jp_policy_out:
+ description:
+ - Policy for join-prune messages (outbound).
+ required: true
+ default: null
+ jp_policy_in:
+ description:
+ - Policy for join-prune messages (inbound).
+ required: false
+ default: null
+ jp_type_out:
+ description:
+ - Type of policy mapped to C(jp_policy_out).
+ required: false
+ default: null
+ choices: ['prefix', 'routemap']
+ jp_type_in:
+ description:
+ - Type of policy mapped to C(jp_policy_in).
+ required: false
+ default: null
+ choices: ['prefix', 'routemap']
+ border:
+ description:
+ - Configures interface to be a boundary of a PIM domain.
+ required: false
+ default: null
+ choices: ['true', 'false']
+ neighbor_policy:
+ description:
+ - Configures a neighbor policy for filtering adjacencies.
+ required: false
+ default: null
+ neighbor_type:
+ description:
+ - Type of policy mapped to neighbor_policy.
+ required: false
+ default: null
+ choices: ['prefix', 'routemap']
+ state:
+ description:
+ - Manages desired state of the resource.
+ required: false
+ default: present
+ choices: ['present', 'default']
+'''
+EXAMPLES = '''
+# ensure PIM is not running on the interface
+- nxos_pim_interface:
+ interface: eth1/33
+ state: absent
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# ensure the interface has pim-sm enabled with the appropriate priority and hello interval
+- nxos_pim_interface:
+ interface: eth1/33
+ dr_prio: 10
+ hello_interval: 40
+ state: present
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# ensure join-prune policies exist
+- nxos_pim_interface:
+ interface: eth1/33
+ jp_policy_in: JPIN
+ jp_policy_out: JPOUT
+ jp_type_in: routemap
+ jp_type_out: routemap
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# ensure defaults are in place
+- nxos_pim_interface:
+ interface: eth1/33
+ state: default
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"interface": "eth1/33", "neighbor_policy": "test",
+ "neighbor_type": "routemap", "sparse": true}
+existing:
+ description:
+ - k/v pairs of existing configuration
+ type: dict
+ sample: {"border": false, "dr_prio": "1", "hello_interval": "30000",
+ "isauth": false, "jp_bidir": false, "jp_policy_in": "JPIN",
+ "jp_policy_out": "1", "jp_type_in": "routemap",
+ "jp_type_out": null, "neighbor_policy": "test1",
+ "neighbor_type": "prefix", "sparse": true}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: always
+ type: dict
+ sample: {"border": false, "dr_prio": "1", "hello_interval": "30000",
+ "isauth": false, "jp_bidir": false, "jp_policy_in": "JPIN",
+ "jp_policy_out": "1", "jp_type_in": "routemap",
+ "jp_type_out": null, "neighbor_policy": "test",
+ "neighbor_type": "routemap", "sparse": true}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["interface eth1/33", "ip pim neighbor-policy test",
+ "ip pim neighbor-policy test"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+import json
+import time
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module, text=False):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0] or response[0] == '\n' or '^' in response[0]:
+ body = []
+ elif 'show run' in command or text:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show', text=False):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command and text is False:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module, text=text)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def local_existing(gexisting):
+ jp_bidir = False
+ isauth = False
+ if gexisting:
+ jp_bidir = gexisting.get('jp_bidir')
+ isauth = gexisting.get('isauth')
+ if jp_bidir and isauth:
+ gexisting.pop('jp_bidir')
+ gexisting.pop('isauth')
+ gexisting['sparse'] = True
+
+ return gexisting, jp_bidir, isauth
+
+
+def get_interface_type(interface):
+ if interface.upper().startswith('ET'):
+ return 'ethernet'
+ elif interface.upper().startswith('VL'):
+ return 'svi'
+ elif interface.upper().startswith('LO'):
+ return 'loopback'
+ elif interface.upper().startswith('MG'):
+ return 'management'
+ elif interface.upper().startswith('MA'):
+ return 'management'
+ elif interface.upper().startswith('PO'):
+ return 'portchannel'
+ else:
+ return 'unknown'
+
+
+def get_interface_mode(interface, intf_type, module):
+ command = 'show interface {0}'.format(interface)
+ mode = 'unknown'
+ interface_table = {}
+ body = execute_show_command(command, module)
+
+ try:
+ interface_table = body[0]['TABLE_interface']['ROW_interface']
+ except (KeyError, AttributeError, IndexError):
+ return mode
+
+ if intf_type in ['ethernet', 'portchannel']:
+ mode = str(interface_table.get('eth_mode', 'layer3'))
+ if mode in ['access', 'trunk']:
+ mode = 'layer2'
+ elif mode == 'routed':
+ mode = 'layer3'
+ elif intf_type in ['loopback', 'svi']:
+ mode = 'layer3'
+ return mode
+
+
+def get_pim_interface(module, interface):
+ pim_interface = {}
+ command = 'show ip pim interface {0}'.format(interface)
+
+ body = execute_show_command(command, module,
+ command_type='cli_show_ascii', text=True)
+
+ if body:
+ if 'not running' not in body[0]:
+ body = execute_show_command(command, module)
+
+ try:
+ get_data = body[0]['TABLE_iod']['ROW_iod']
+
+ if isinstance(get_data.get('dr-priority'), unicode) or \
+ isinstance(get_data.get('dr-priority'), str):
+ pim_interface['dr_prio'] = get_data.get('dr-priority')
+ else:
+ pim_interface['dr_prio'] = get_data.get('dr-priority')[0]
+
+ hello_interval = get_data.get('hello-interval-sec')
+ if hello_interval:
+ hello_interval_msec = int(get_data.get('hello-interval-sec'))*1000
+ pim_interface['hello_interval'] = str(hello_interval_msec)
+ border = get_data.get('is-border')
+
+ if border == 'true':
+ pim_interface['border'] = True
+ elif border == 'false':
+ pim_interface['border'] = False
+
+ isauth = get_data.get('isauth-config')
+ if isauth == 'true':
+ pim_interface['isauth'] = True
+ elif isauth == 'false':
+ pim_interface['isauth'] = False
+
+ pim_interface['neighbor_policy'] = get_data.get('nbr-policy-name')
+ if pim_interface['neighbor_policy'] == 'none configured':
+ pim_interface['neighbor_policy'] = None
+
+ jp_in_policy = get_data.get('jp-in-policy-name')
+ pim_interface['jp_policy_in'] = jp_in_policy
+ if jp_in_policy == 'none configured':
+ pim_interface['jp_policy_in'] = None
+
+ if isinstance(get_data.get('jp-out-policy-name'), unicode) or \
+ isinstance(get_data.get('jp-out-policy-name'), str):
+ pim_interface['jp_policy_out'] = get_data.get('jp-out-policy-name')
+ else:
+ pim_interface['jp_policy_out'] = get_data.get(
+ 'jp-out-policy-name')[0]
+
+ if pim_interface['jp_policy_out'] == 'none configured':
+ pim_interface['jp_policy_out'] = None
+
+ except (KeyError, AttributeError, TypeError, IndexError):
+ return {}
+
+ command = 'show run interface {0}'.format(interface)
+
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+
+ jp_configs = []
+ neigh = None
+ if body:
+ all_lines = body[0].splitlines()
+
+ for each in all_lines:
+ if 'jp-policy' in each:
+ jp_configs.append(str(each.strip()))
+ elif 'neighbor-policy' in each:
+ neigh = str(each)
+
+ pim_interface['neighbor_type'] = None
+ neigh_type = None
+ if neigh:
+ if 'prefix-list' in neigh:
+ neigh_type = 'prefix'
+ else:
+ neigh_type = 'routemap'
+ pim_interface['neighbor_type'] = neigh_type
+
+ len_existing = len(jp_configs)
+ list_of_prefix_type = len([x for x in jp_configs if 'prefix-list' in x])
+ jp_type_in = None
+ jp_type_out = None
+ jp_bidir = False
+ if len_existing == 1:
+ # determine type
+ last_word = jp_configs[0].split(' ')[-1]
+ if last_word == 'in':
+ if list_of_prefix_type:
+ jp_type_in = 'prefix'
+ else:
+ jp_type_in = 'routemap'
+ elif last_word == 'out':
+ if list_of_prefix_type:
+ jp_type_out = 'prefix'
+ else:
+ jp_type_out = 'routemap'
+ else:
+ jp_bidir = True
+ if list_of_prefix_type:
+ jp_type_in = 'prefix'
+ jp_type_out = 'routemap'
+ else:
+ jp_type_in = 'routemap'
+ jp_type_out = 'routemap'
+ else:
+ for each in jp_configs:
+ last_word = each.split(' ')[-1]
+ if last_word == 'in':
+ if 'prefix-list' in each:
+ jp_type_in = 'prefix'
+ else:
+ jp_type_in = 'routemap'
+ elif last_word == 'out':
+ if 'prefix-list' in each:
+ jp_type_out = 'prefix'
+ else:
+ jp_type_out = 'routemap'
+
+ pim_interface['jp_type_in'] = jp_type_in
+ pim_interface['jp_type_out'] = jp_type_out
+ pim_interface['jp_bidir'] = jp_bidir
+
+ return pim_interface
+
+
+def fix_delta(delta, existing):
+ if delta.get('sparse') is False and existing.get('sparse') is None:
+ delta.pop('sparse')
+ return delta
+
+
+def config_pim_interface(delta, existing, jp_bidir, isauth):
+ command = None
+ commands = []
+
+ delta = fix_delta(delta, existing)
+
+ CMDS = {
+ 'sparse': 'ip pim sparse-mode',
+ 'dr_prio': 'ip pim dr-priority {0}',
+ 'hello_interval': 'ip pim hello-interval {0}',
+ 'hello_auth_key': 'ip pim hello-authentication ah-md5 {0}',
+ 'border': 'ip pim border',
+ 'jp_policy_out': 'ip pim jp-policy prefix-list {0} out',
+ 'jp_policy_in': 'ip pim jp-policy prefix-list {0} in',
+ 'jp_type_in': '',
+ 'jp_type_out': '',
+ 'neighbor_policy': 'ip pim neighbor-policy prefix-list {0}',
+ 'neighbor_type': ''
+ }
+
+ if jp_bidir:
+ if delta.get('jp_policy_in') or delta.get('jp_policy_out'):
+ if existing.get('jp_type_in') == 'prefix':
+ command = 'no ip pim jp-policy prefix-list {0}'.format(
+ existing.get('jp_policy_in')
+ )
+ else:
+ command = 'no ip pim jp-policy {0}'.format(
+ existing.get('jp_policy_in')
+ )
+ if command:
+ commands.append(command)
+
+ for k, v in delta.iteritems():
+ if k in ['dr_prio', 'hello_interval', 'hello_auth_key', 'border',
+ 'sparse']:
+ if v:
+ command = CMDS.get(k).format(v)
+ elif k == 'hello_auth_key':
+ if isauth:
+ command = 'no ip pim hello-authentication ah-md5'
+ else:
+ command = 'no ' + CMDS.get(k).format(v)
+
+ if command:
+ commands.append(command)
+ elif k in ['neighbor_policy', 'jp_policy_in', 'jp_policy_out',
+ 'neighbor_type']:
+ if k in ['neighbor_policy', 'neighbor_type']:
+ temp = delta.get('neighbor_policy') or existing.get(
+ 'neighbor_policy')
+ if delta.get('neighbor_type') == 'prefix':
+ command = CMDS.get(k).format(temp)
+ elif delta.get('neighbor_type') == 'routemap':
+ command = 'ip pim neighbor-policy {0}'.format(temp)
+ elif existing.get('neighbor_type') == 'prefix':
+ command = CMDS.get(k).format(temp)
+ elif existing.get('neighbor_type') == 'routemap':
+ command = 'ip pim neighbor-policy {0}'.format(temp)
+ elif k in ['jp_policy_in', 'jp_type_in']:
+ temp = delta.get('jp_policy_in') or existing.get(
+ 'jp_policy_in')
+ if delta.get('jp_type_in') == 'prefix':
+ command = CMDS.get(k).format(temp)
+ elif delta.get('jp_type_in') == 'routemap':
+ command = 'ip pim jp-policy {0} in'.format(temp)
+ elif existing.get('jp_type_in') == 'prefix':
+ command = CMDS.get(k).format(temp)
+ elif existing.get('jp_type_in') == 'routemap':
+ command = 'ip pim jp-policy {0} in'.format(temp)
+ elif k in ['jp_policy_out', 'jp_type_out']:
+ temp = delta.get('jp_policy_out') or existing.get(
+ 'jp_policy_out')
+ if delta.get('jp_type_out') == 'prefix':
+ command = CMDS.get(k).format(temp)
+ elif delta.get('jp_type_out') == 'routemap':
+ command = 'ip pim jp-policy {0} out'.format(temp)
+ elif existing.get('jp_type_out') == 'prefix':
+ command = CMDS.get(k).format(temp)
+ elif existing.get('jp_type_out') == 'routemap':
+ command = 'ip pim jp-policy {0} out'.format(temp)
+ if command:
+ commands.append(command)
+ command = None
+
+ return commands
+
+
+def get_pim_interface_defaults():
+ dr_prio = '1'
+ border = False
+ hello_interval = '30000'
+ hello_auth_key = False
+
+ args = dict(dr_prio=dr_prio, border=border,
+ hello_interval=hello_interval,
+ hello_auth_key=hello_auth_key)
+
+ default = dict((param, value) for (param, value) in args.iteritems()
+ if value is not None)
+
+ return default
+
+
+def default_pim_interface_policies(existing, jp_bidir):
+ commands = []
+
+ if jp_bidir:
+ if existing.get('jp_policy_in') or existing.get('jp_policy_out'):
+ if existing.get('jp_type_in') == 'prefix':
+ command = 'no ip pim jp-policy prefix-list {0}'.format(
+ existing.get('jp_policy_in')
+ )
+ if command:
+ commands.append(command)
+
+ elif not jp_bidir:
+ command = None
+ for k, v in existing.iteritems():
+ if k == 'jp_policy_in':
+ if existing.get('jp_policy_in'):
+ if existing.get('jp_type_in') == 'prefix':
+ command = 'no ip pim jp-policy prefix-list {0} in'.format(
+ existing.get('jp_policy_in')
+ )
+ else:
+ command = 'no ip pim jp-policy {0} in'.format(
+ existing.get('jp_policy_in')
+ )
+ elif k == 'jp_policy_out':
+ if existing.get('jp_policy_out'):
+ if existing.get('jp_type_out') == 'prefix':
+ command = 'no ip pim jp-policy prefix-list {0} out'.format(
+ existing.get('jp_policy_out')
+ )
+ else:
+ command = 'no ip pim jp-policy {0} out'.format(
+ existing.get('jp_policy_out')
+ )
+ if command:
+ commands.append(command)
+ command = None
+
+ if existing.get('neighbor_policy'):
+ command = 'no ip pim neighbor-policy'
+ commands.append(command)
+
+ return commands
+
+
+def config_pim_interface_defaults(existing, jp_bidir, isauth):
+ command = []
+
+ # returns a dict
+ defaults = get_pim_interface_defaults()
+ delta = dict(set(defaults.iteritems()).difference(
+ existing.iteritems()))
+ if delta:
+ # returns a list
+ command = config_pim_interface(delta, existing,
+ jp_bidir, isauth)
+ comm = default_pim_interface_policies(existing, jp_bidir)
+ if comm:
+ for each in comm:
+ command.append(each)
+
+ return command
+
+
+def main():
+ argument_spec=dict(
+ interface=dict(required=True),
+ sparse=dict(type='bool', default=True),
+ dr_prio=dict(type='str'),
+ hello_auth_key=dict(type='str'),
+ hello_interval=dict(type='int'),
+ jp_policy_out=dict(type='str'),
+ jp_policy_in=dict(type='str'),
+ jp_type_out=dict(choices=['prefix', 'routemap']),
+ jp_type_in=dict(choices=['prefix', 'routemap']),
+ border=dict(type='bool'),
+ neighbor_policy=dict(type='str'),
+ neighbor_type=dict(choices=['prefix', 'routemap']),
+ state=dict(choices=['present', 'absent', 'default'],
+ default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+
+ sparse = module.params['sparse']
+ interface = module.params['interface']
+ jp_type_in = module.params['jp_type_in']
+ jp_type_out = module.params['jp_type_out']
+ jp_policy_in = module.params['jp_policy_in']
+ jp_policy_out = module.params['jp_policy_out']
+ neighbor_policy = module.params['neighbor_policy']
+ neighbor_type = module.params['neighbor_type']
+ hello_interval = module.params['hello_interval']
+
+ intf_type = get_interface_type(interface)
+ if get_interface_mode(interface, intf_type, module) == 'layer2':
+ module.fail_json(msg='this module only works on Layer 3 interfaces.')
+
+ if jp_policy_in:
+ if not jp_type_in:
+ module.fail_json(msg='jp_type_in required when using jp_policy_in.')
+ if jp_policy_out:
+ if not jp_type_out:
+ module.fail_json(msg='jp_type_out required when using '
+ ' jp_policy_out.')
+ if neighbor_policy:
+ if not neighbor_type:
+ module.fail_json(msg='neighbor_type required when using '
+ 'neighbor_policy.')
+
+ get_existing = get_pim_interface(module, interface)
+ existing, jp_bidir, isauth = local_existing(get_existing)
+ end_state = existing
+ changed = False
+
+ commands = []
+
+ args = [
+ 'interface',
+ 'sparse',
+ 'dr_prio',
+ 'hello_auth_key',
+ 'hello_interval',
+ 'jp_policy_out',
+ 'jp_type_out',
+ 'jp_type_in',
+ 'jp_policy_in',
+ 'border',
+ 'neighbor_type',
+ 'neighbor_policy'
+ ]
+ proposed = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ '''
+ CANNOT_ABSENT = ['dr_prio', 'hello_interval',
+ 'hello_auth_key', 'jp_policy_out', 'jp_policy_in',
+ 'jp_type_out', 'jp_type_in', 'border', 'neighbor_type',
+ 'neighbor_policy']
+ '''
+
+ if hello_interval:
+ proposed['hello_interval'] = str(proposed['hello_interval'] * 1000)
+
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+
+ if state == 'present':
+ if delta:
+ command = config_pim_interface(delta, existing, jp_bidir, isauth)
+ if command:
+ commands.append(command)
+ elif state == 'default':
+ defaults = config_pim_interface_defaults(existing, jp_bidir, isauth)
+ if defaults:
+ commands.append(defaults)
+
+ elif state == 'absent':
+ if existing.get('sparse') == True:
+ delta['sparse'] = False
+ # defaults is a list of commands
+ defaults = config_pim_interface_defaults(existing, jp_bidir, isauth)
+ if defaults:
+ commands.append(defaults)
+
+ command = config_pim_interface(delta, existing, jp_bidir, isauth)
+ commands.append(command)
+
+ if commands:
+ commands.insert(0, ['interface {0}'.format(interface)])
+
+ cmds = flatten_list(commands)
+ results = {}
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ time.sleep(1)
+ get_existing = get_pim_interface(module, interface)
+ end_state, jp_bidir, isauth = local_existing(get_existing)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['updates'] = cmds
+ results['changed'] = changed
+ results['end_state'] = end_state
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_pim_rp_address.py b/network/nxos/nxos_pim_rp_address.py
new file mode 100644
index 00000000000..c53747e02de
--- /dev/null
+++ b/network/nxos/nxos_pim_rp_address.py
@@ -0,0 +1,416 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_pim_rp_address
+version_added: "2.2"
+short_description: Manages configuration of an PIM static RP address instance.
+description:
+ - Manages configuration of an Protocol Independent Multicast (PIM) static
+ rendezvous point (RP) address instance.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - C(state=absent) remove the whole rp-address configuration, if existing.
+options:
+ rp_address:
+ description:
+ - Configures a Protocol Independent Multicast (PIM) static
+ rendezvous point (RP) address. Valid values are
+ unicast addresses.
+ required: true
+ group_list:
+ description:
+ - Group range for static RP. Valid values are multicast addresses.
+ required: false
+ default: null
+ prefix_list:
+ description:
+ - Prefix list policy for static RP. Valid values are prefix-list
+ policy names.
+ required: false
+ default: null
+ route_map:
+ description:
+ - Route map policy for static RP. Valid values are route-map
+ policy names.
+ required: false
+ default: null
+ bidir:
+ description:
+ - Group range is treated in PIM bidirectional mode.
+ required: false
+ choices: ['true','false']
+ default: null
+'''
+EXAMPLES = '''
+- nxos_pim_rp_address:
+ rp_address: "10.1.1.20"
+ state: present
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"rp_address": "10.1.1.21"}
+existing:
+ description: list of existing pim rp-address configuration entries
+ returned: verbose mode
+ type: list
+ sample: []
+end_state:
+ description: pim rp-address configuration entries after module execution
+ returned: verbose mode
+ type: list
+ sample: [{"bidir": false, "group_list": "224.0.0.0/4",
+ "rp_address": "10.1.1.21"}]
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+BOOL_PARAMS = ['bidir']
+PARAM_TO_COMMAND_KEYMAP = {
+ 'rp_address': 'ip pim rp-address'
+}
+PARAM_TO_DEFAULT_KEYMAP = {}
+WARNINGS = []
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(config, module):
+ value_list = []
+ splitted_config = config.splitlines()
+ for line in splitted_config:
+ tmp = {}
+ if 'ip pim rp-address' in line:
+ splitted_line = line.split()
+ tmp['rp_address'] = splitted_line[3]
+ if len(splitted_line) > 5:
+ value = splitted_line[5]
+ if splitted_line[4] == 'route-map':
+ tmp['route_map'] = value
+ elif splitted_line[4] == 'prefix-list':
+ tmp['prefix_list'] = value
+ elif splitted_line[4] == 'group-list':
+ tmp['group_list'] = value
+ if 'bidir' in line:
+ tmp['bidir'] = True
+ else:
+ tmp['bidir'] = False
+ value_list.append(tmp)
+ return value_list
+
+
+def get_existing(module, args):
+ existing = {}
+ config = str(get_config(module))
+ existing = get_value(config, module)
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def state_present(module, existing, proposed, candidate):
+ command = 'ip pim rp-address {0}'.format(module.params['rp_address'])
+ commands = build_command(proposed, command)
+ if commands:
+ candidate.add(commands, parents=[])
+
+
+def build_command(param_dict, command):
+ for param in ['group_list', 'prefix_list', 'route_map']:
+ if param_dict.get(param):
+ command += ' {0} {1}'.format(
+ param.replace('_', '-'), param_dict.get(param))
+ if param_dict.get('bidir'):
+ command += ' bidir'
+ return [command]
+
+
+def state_absent(module, existing, proposed, candidate):
+ commands = list()
+ for each in existing:
+ if each.get('rp_address') == proposed['rp_address']:
+ command = 'no ip pim rp-address {0}'.format(proposed['rp_address'])
+ if each.get('group_list'):
+ commands = build_command(each, command)
+ else:
+ commands = [command]
+ if commands:
+ candidate.add(commands, parents=[])
+
+
+def main():
+ argument_spec = dict(
+ rp_address=dict(required=True, type='str'),
+ group_list=dict(required=False, type='str'),
+ prefix_list=dict(required=False, type='str'),
+ route_map=dict(required=False, type='str'),
+ bidir=dict(required=False, type='bool'),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ mutually_exclusive=[['group_list', 'route_map'],
+ ['group_list', 'prefix_list'],
+ ['route_map', 'prefix_list']],
+ supports_check_mode=True)
+
+ state = module.params['state']
+
+ args = [
+ 'rp_address',
+ 'group_list',
+ 'prefix_list',
+ 'route_map',
+ 'bidir'
+ ]
+
+ existing = invoke('get_existing', module, args)
+ end_state = existing
+ proposed_args = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ proposed = {}
+ for key, value in proposed_args.iteritems():
+ if str(value).lower() == 'true':
+ value = True
+ elif str(value).lower() == 'false':
+ value = False
+ for each in existing:
+ if each.get(key) or (not each.get(key) and value):
+ proposed[key] = value
+
+ result = {}
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed_args
+
+ if WARNINGS:
+ result['warnings'] = WARNINGS
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_ping.py b/network/nxos/nxos_ping.py
index 1e29aebd5bd..a698b98ba8d 100644
--- a/network/nxos/nxos_ping.py
+++ b/network/nxos/nxos_ping.py
@@ -16,11 +16,15 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: nxos_ping
version_added: "2.1"
-short_description: Tests reachability using ping from Nexus switch
+short_description: Tests reachability using ping from Nexus switch.
description:
- Tests reachability using ping from switch to a remote destination.
extends_documentation_fragment: nxos
@@ -50,10 +54,17 @@
'''
EXAMPLES = '''
-# test reachability to 8.8.8.8 using mgmt vrf
-- nxos_ping: dest=8.8.8.8 vrf=management host={{ inventory_hostname }}
-# Test reachability to a few different public IPs using mgmt vrf
-- nxos_ping: dest={{ item }} vrf=management host={{ inventory_hostname }}
+- name: Test reachability to 8.8.8.8 using mgmt vrf
+ nxos_ping:
+ dest: 8.8.8.8
+ vrf: management
+ host: 68.170.147.165
+
+- name: Test reachability to a few different public IPs using mgmt vrf
+ nxos_ping:
+ dest: nxos_ping
+ vrf: management
+ host: 68.170.147.165
with_items:
- 8.8.8.8
- 4.4.4.4
@@ -105,6 +116,162 @@
sample: "0.00%"
'''
+import json
+import collections
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
def get_summary(results_list, reference_point):
summary_string = results_list[reference_point+1]
@@ -147,6 +314,11 @@ def get_statistics_summary_line(response_as_list):
def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
@@ -156,6 +328,19 @@ def execute_show(cmds, module, command_type=None):
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
return response
@@ -199,9 +384,12 @@ def main():
source=dict(required=False),
state=dict(required=False, choices=['present', 'absent'],
default='present'),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
)
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
destination = module.params['dest']
count = module.params['count']
@@ -254,10 +442,5 @@ def main():
module.exit_json(**results)
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.nxos import *
if __name__ == '__main__':
main()
diff --git a/network/nxos/nxos_portchannel.py b/network/nxos/nxos_portchannel.py
new file mode 100644
index 00000000000..acb0a61b4c4
--- /dev/null
+++ b/network/nxos/nxos_portchannel.py
@@ -0,0 +1,746 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_portchannel
+version_added: "2.2"
+short_description: Manages port-channel interfaces.
+description:
+ - Manages port-channel specific configuration parameters.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - C(state=absent) removes the portchannel config and interface if it
+ already exists. If members to be removed are not explicitly
+ passed, all existing members (if any), are removed.
+ - Members must be a list.
+ - LACP needs to be enabled first if active/passive modes are used.
+options:
+ group:
+ description:
+ - Channel-group number for the port-channel.
+ required: true
+ mode:
+ description:
+ - Mode for the port-channel, i.e. on, active, passive.
+ required: false
+ default: on
+ choices: ['active','passive','on']
+ min_links:
+ description:
+ - Min links required to keep portchannel up.
+ required: false
+ default: null
+ members:
+ description:
+ - List of interfaces that will be managed in a given portchannel.
+ required: false
+ default: null
+ force:
+ description:
+ - When true it forces port-channel members to match what is
+ declared in the members param. This can be used to remove
+ members.
+ required: false
+ choices: ['true', 'false']
+ default: false
+ state:
+ description:
+ - Manage the state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+EXAMPLES = '''
+# Ensure port-channel99 is created, add two members, and set to mode on
+- nxos_portchannel:
+ group: 99
+ members: ['Ethernet1/1','Ethernet1/2']
+ mode: 'active'
+ state: present
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"group": "12", "members": ["Ethernet2/5",
+ "Ethernet2/6"], "mode": "on"}
+existing:
+ description:
+ - k/v pairs of existing portchannel
+ type: dict
+ sample: {"group": "12", "members": ["Ethernet2/5",
+ "Ethernet2/6"], "members_detail": {
+ "Ethernet2/5": {"mode": "active", "status": "D"},
+ "Ethernet2/6": {"mode": "active", "status": "D"}},
+ "min_links": null, "mode": "active"}
+end_state:
+ description: k/v pairs of portchannel info after module execution
+ returned: always
+ type: dict
+ sample: {"group": "12", "members": ["Ethernet2/5",
+ "Ethernet2/6"], "members_detail": {
+ "Ethernet2/5": {"mode": "on", "status": "D"},
+ "Ethernet2/6": {"mode": "on", "status": "D"}},
+ "min_links": null, "mode": "on"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["interface Ethernet2/6", "no channel-group 12",
+ "interface Ethernet2/5", "no channel-group 12",
+ "interface Ethernet2/6", "channel-group 12 mode on",
+ "interface Ethernet2/5", "channel-group 12 mode on"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import collections
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+WARNINGS = []
+PARAM_TO_COMMAND_KEYMAP = {
+ 'min_links': 'lacp min-links'
+}
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(arg, config, module):
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def check_interface(module, netcfg):
+ config = str(netcfg)
+ REGEX = re.compile(r'\s+interface port-channel{0}*$'.format(module.params['group']), re.M)
+ value = False
+ try:
+ if REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+
+ return value
+
+
+def get_custom_value(arg, config, module):
+ REGEX = re.compile(r'\s+member vni {0} associate-vrf\s*$'.format(
+ module.params['vni']), re.M)
+ value = False
+ try:
+ if REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+ return value
+
+
+def execute_config_command(commands, module):
+ try:
+ output = module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ output = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ return output
+
+
+def get_cli_body_ssh(command, response, module):
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show port-channel summary' in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def get_portchannel_members(pchannel):
+ try:
+ members = pchannel['TABLE_member']['ROW_member']
+ except KeyError:
+ members = []
+
+ return members
+
+
+def get_portchannel_mode(interface, protocol, module, netcfg):
+ if protocol != 'LACP':
+ mode = 'on'
+ else:
+ netcfg = get_config(module)
+ parents = ['interface {0}'.format(interface.capitalize())]
+ body = netcfg.get_section(parents)
+
+ mode_list = body.split('\n')
+
+ for line in mode_list:
+ this_line = line.strip()
+ if this_line.startswith('channel-group'):
+ find = this_line
+ if 'mode' in find:
+ if 'passive' in find:
+ mode = 'passive'
+ elif 'active' in find:
+ mode = 'active'
+
+ return mode
+
+
+def get_portchannel(module, netcfg=None):
+ command = 'show port-channel summary'
+ portchannel = {}
+ portchannel_table = {}
+ members = []
+
+ body = execute_show_command(command, module)
+
+ try:
+ pc_table = body[0]['TABLE_channel']['ROW_channel']
+
+ if isinstance(pc_table, dict):
+ pc_table = [pc_table]
+
+ for pc in pc_table:
+ if pc['group'] == module.params['group']:
+ portchannel_table = pc
+ except (KeyError, AttributeError, TypeError, IndexError):
+ return {}
+
+ if portchannel_table:
+ portchannel['group'] = portchannel_table['group']
+ protocol = portchannel_table['prtcl']
+ members_list = get_portchannel_members(portchannel_table)
+
+ if isinstance(members_list, dict):
+ members_list = [members_list]
+
+ member_dictionary = {}
+ for each_member in members_list:
+ interface = each_member['port']
+ members.append(interface)
+
+ pc_member = {}
+ pc_member['status'] = str(each_member['port-status'])
+ pc_member['mode'] = get_portchannel_mode(interface,
+ protocol, module, netcfg)
+
+ member_dictionary[interface] = pc_member
+ portchannel['members'] = members
+ portchannel['members_detail'] = member_dictionary
+
+ # Ensure each member have the same mode.
+ modes = set()
+ for each, value in member_dictionary.iteritems():
+ modes.update([value['mode']])
+ if len(modes) == 1:
+ portchannel['mode'] = value['mode']
+ else:
+ portchannel['mode'] = 'unknown'
+ return portchannel
+
+
+def get_existing(module, args):
+ existing = {}
+ netcfg = get_config(module)
+
+ interface_exist = check_interface(module, netcfg)
+ if interface_exist:
+ parents = ['interface port-channel{0}'.format(module.params['group'])]
+ config = netcfg.get_section(parents)
+
+ if config:
+ existing['min_links'] = get_value('min_links', config, module)
+ existing.update(get_portchannel(module, netcfg=netcfg))
+
+ return existing, interface_exist
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def config_portchannel(proposed, mode, group):
+ commands = []
+ config_args = {
+ 'mode': 'channel-group {group} mode {mode}',
+ 'min_links': 'lacp min-links {min_links}',
+ }
+
+ for member in proposed.get('members', []):
+ commands.append('interface {0}'.format(member))
+ commands.append(config_args.get('mode').format(group=group, mode=mode))
+
+ min_links = proposed.get('min_links', None)
+ if min_links:
+ command = 'interface port-channel {0}'.format(group)
+ commands.append(command)
+ commands.append(config_args.get('min_links').format(
+ min_links=min_links))
+
+ return commands
+
+
+def get_commands_to_add_members(proposed, existing, module):
+ try:
+ proposed_members = proposed['members']
+ except KeyError:
+ proposed_members = []
+
+ try:
+ existing_members = existing['members']
+ except KeyError:
+ existing_members = []
+
+ members_to_add = list(set(proposed_members).difference(existing_members))
+
+ commands = []
+ if members_to_add:
+ for member in members_to_add:
+ commands.append('interface {0}'.format(member))
+ commands.append('channel-group {0} mode {1}'.format(
+ existing['group'], proposed['mode']))
+
+ return commands
+
+
+def get_commands_to_remove_members(proposed, existing, module):
+ try:
+ proposed_members = proposed['members']
+ except KeyError:
+ proposed_members = []
+
+ try:
+ existing_members = existing['members']
+ except KeyError:
+ existing_members = []
+
+ members_to_remove = list(set(existing_members).difference(proposed_members))
+ commands = []
+ if members_to_remove:
+ for member in members_to_remove:
+ commands.append('interface {0}'.format(member))
+ commands.append('no channel-group {0}'.format(existing['group']))
+
+ return commands
+
+
+def get_commands_if_mode_change(proposed, existing, group, mode, module):
+ try:
+ proposed_members = proposed['members']
+ except KeyError:
+ proposed_members = []
+
+ try:
+ existing_members = existing['members']
+ except KeyError:
+ existing_members = []
+
+ try:
+ members_dict = existing['members_detail']
+ except KeyError:
+ members_dict = {}
+
+ members_to_remove = set(existing_members).difference(proposed_members)
+ members_with_mode_change = []
+ if members_dict:
+ for interface, values in members_dict.iteritems():
+ if (interface in proposed_members and
+ (interface not in members_to_remove)):
+ if values['mode'] != mode:
+ members_with_mode_change.append(interface)
+
+ commands = []
+ if members_with_mode_change:
+ for member in members_with_mode_change:
+ commands.append('interface {0}'.format(member))
+ commands.append('no channel-group {0}'.format(group))
+
+ for member in members_with_mode_change:
+ commands.append('interface {0}'.format(member))
+ commands.append('channel-group {0} mode {1}'.format(group, mode))
+
+ return commands
+
+
+def get_commands_min_links(existing, proposed, group, min_links, module):
+ commands = []
+ try:
+ if (existing['min_links'] is None or
+ (existing['min_links'] != proposed['min_links'])):
+ commands.append('interface port-channel{0}'.format(group))
+ commands.append('lacp min-link {0}'.format(min_links))
+ except KeyError:
+ commands.append('interface port-channel{0}'.format(group))
+ commands.append('lacp min-link {0}'.format(min_links))
+ return commands
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def main():
+ argument_spec = dict(
+ group=dict(required=True, type='str'),
+ mode=dict(required=False, choices=['on', 'active', 'passive'],
+ default='on', type='str'),
+ min_links=dict(required=False, default=None, type='str'),
+ members=dict(required=False, default=None, type='list'),
+ force=dict(required=False, default='false', type='str',
+ choices=['true', 'false']),
+ state=dict(required=False, choices=['absent', 'present'],
+ default='present'),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ group = str(module.params['group'])
+ mode = module.params['mode']
+ min_links = module.params['min_links']
+ members = module.params['members']
+ state = module.params['state']
+
+ if str(module.params['force']).lower() == 'true':
+ force = True
+ elif module.params['force'] == 'false':
+ force = False
+
+ if ((min_links or mode) and
+ (not members and state == 'present')):
+ module.fail_json(msg='"members" is required when state=present and '
+ '"min_links" or "mode" are provided')
+
+ changed = False
+ args = [
+ 'group',
+ 'members',
+ 'min_links',
+ 'mode'
+ ]
+
+ existing, interface_exist = invoke('get_existing', module, args)
+ end_state = existing
+ proposed = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ result = {}
+ commands = []
+ if state == 'absent':
+ if existing:
+ commands.append(['no interface port-channel{0}'.format(group)])
+ elif state == 'present':
+ if not interface_exist:
+ command = config_portchannel(proposed, mode, group)
+ commands.append(command)
+ commands.insert(0, 'interface port-channel{0}'.format(group))
+ WARNINGS.append("The proposed port-channel interface did not "
+ "exist. It's recommended to use nxos_interface to "
+ "create all logical interfaces.")
+
+ elif existing and interface_exist:
+ if force:
+ command = get_commands_to_remove_members(proposed, existing, module)
+ commands.append(command)
+
+ command = get_commands_to_add_members(proposed, existing, module)
+ commands.append(command)
+
+ mode_command = get_commands_if_mode_change(proposed, existing,
+ group, mode, module)
+
+ commands.insert(0, mode_command)
+
+ if min_links:
+ command = get_commands_min_links(existing, proposed,
+ group, min_links, module)
+ commands.append(command)
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ output = execute_config_command(cmds, module)
+ changed = True
+ end_state, interface_exist = get_existing(module, args)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ if WARNINGS:
+ results['warnings'] = WARNINGS
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/network/nxos/nxos_reboot.py b/network/nxos/nxos_reboot.py
new file mode 100644
index 00000000000..c479d10a806
--- /dev/null
+++ b/network/nxos/nxos_reboot.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_reboot
+version_added: 2.2
+short_description: Reboot a network device.
+description:
+ - Reboot a network device.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - The module will fail due to timeout issues, but the reboot will be
+ performed anyway.
+options:
+ confirm:
+ description:
+ - Safeguard boolean. Set to true if you're sure you want to reboot.
+ required: false
+ default: false
+'''
+
+EXAMPLES = '''
+- nxos_reboot:
+ confirm: true
+ host: "{{ inventory_hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+rebooted:
+ description: Whether the device was instructed to reboot.
+ returned: success
+ type: boolean
+ sample: true
+'''
+
+import json
+import collections
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+
+def reboot(module):
+ disable_confirmation(module)
+ execute_show_command(['reload'], module, command_type='cli_show_ascii')
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ body = execute_show(command, module)
+ elif module.params['transport'] == 'nxapi':
+ body = execute_show(command, module, command_type=command_type)
+
+ return body
+
+
+def disable_confirmation(module):
+ command = ['terminal dont-ask']
+ body = execute_show_command(command, module, command_type='cli_show_ascii')[0]
+
+
+def main():
+ argument_spec = dict(
+ confirm=dict(required=True, type='bool'),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ confirm = module.params['confirm']
+ if not confirm:
+ module.fail_json(msg='confirm must be set to true for this '
+ 'module to work.')
+
+ changed = False
+ rebooted = False
+
+ reboot(module)
+
+ changed = True
+ rebooted = True
+
+ results = {}
+ results['changed'] = changed
+ results['rebooted'] = rebooted
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_rollback.py b/network/nxos/nxos_rollback.py
new file mode 100644
index 00000000000..736fa25ccaa
--- /dev/null
+++ b/network/nxos/nxos_rollback.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_rollback
+version_added: "2.2"
+short_description: Set a checkpoint or rollback to a checkpoint.
+description:
+ - This module offers the ability to set a configuration checkpoint
+ file or rollback to a configuration checkpoint file on Cisco NXOS
+ switches.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - Sometimes C(transport=nxapi) may cause a timeout error.
+options:
+ checkpoint_file:
+ description:
+ - Name of checkpoint file to create. Mutually exclusive
+ with rollback_to.
+ required: false
+ default: null
+ rollback_to:
+ description:
+ - Name of checkpoint file to rollback to. Mutually exclusive
+ with checkpoint_file.
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+- nxos_rollback:
+ checkpoint_file: backup.cfg
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+- nxos_rollback:
+ rollback_to: backup.cfg
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+filename:
+ description: The filename of the checkpoint/rollback file.
+ returned: success
+ type: string
+ sample: 'backup.cfg'
+status:
+ description: Which operation took place and whether it was successful.
+ returned: success
+ type: string
+ sample: 'rollback executed'
+'''
+
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_commands(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def prepare_show_command(command, module):
+ if module.params['transport'] == 'cli':
+ execute_commands(command, module)
+ elif module.params['transport'] == 'nxapi':
+ execute_commands(command, module, command_type='cli_show_ascii')
+
+
+def checkpoint(filename, module):
+ commands = ['terminal dont-ask', 'checkpoint file %s' % filename]
+ prepare_show_command(commands, module)
+
+
+def rollback(filename, module):
+ commands = ['rollback running-config file %s' % filename]
+ try:
+ module.configure(commands)
+ except AttributeError:
+ try:
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def main():
+ argument_spec = dict(
+ checkpoint_file=dict(required=False),
+ rollback_to=dict(required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ mutually_exclusive=[['checkpoint_file',
+ 'rollback_to']],
+ supports_check_mode=False)
+
+ checkpoint_file = module.params['checkpoint_file']
+ rollback_to = module.params['rollback_to']
+
+ status = None
+ filename = None
+ changed = False
+ try:
+ if checkpoint_file:
+ checkpoint(checkpoint_file, module)
+ status = 'checkpoint file created'
+ elif rollback_to:
+ rollback(rollback_to, module)
+ status = 'rollback executed'
+ changed = True
+ filename = rollback_to or checkpoint_file
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg=str(clie))
+
+ module.exit_json(changed=changed, status=status, filename=filename)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_smu.py b/network/nxos/nxos_smu.py
new file mode 100644
index 00000000000..1404725ac38
--- /dev/null
+++ b/network/nxos/nxos_smu.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_smu
+version_added: "2.2"
+short_description: Perform SMUs on Cisco NX-OS devices.
+description:
+ - Perform software maintenance upgrades (SMUs) on Cisco NX-OS devices.
+extends_documentation_fragment: nxos
+author: Gabriele Gerbino (@GGabriele)
+notes:
+ - The module can only activate and commit a package,
+ not remove or deactivate it.
+ - Use C(transport=nxapi) to avoid connection timeout
+options:
+ pkg:
+ description:
+ - Name of the remote package.
+ required: true
+ file_system:
+ description:
+ - The remote file system of the device. If omitted,
+ devices that support a file_system parameter will use
+ their default values.
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+- nxos_smu:
+ pkg: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+file_system:
+ description: The remote file system of the device.
+ returned: always
+ type: string
+ sample: "bootflash:"
+pkg:
+ description: Name of the remote package
+ type: string
+ returned: always
+ sample: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["install add bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm",
+ "install activate bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm force",
+ "install commit bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import time
+import json
+import collections
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ cmds = [command]
+ body = execute_show(cmds, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def remote_file_exists(module, dst, file_system='bootflash:'):
+ command = 'dir {0}/{1}'.format(file_system, dst)
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+ if 'No such file' in body[0]:
+ return False
+ return True
+
+
+def execute_config_command(commands, module):
+ try:
+ output = module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ output = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ return output
+
+
+def apply_patch(module, commands):
+ for command in commands:
+ response = execute_config_command([command], module)
+ time.sleep(5)
+ if 'failed' in response:
+ module.fail_json(msg="Operation failed!", response=response)
+
+
+def get_commands(module, pkg, file_system):
+ commands = []
+ splitted_pkg = pkg.split('.')
+ fixed_pkg = '.'.join(splitted_pkg[0:-1])
+
+ command = 'show install inactive'
+ inactive_body = execute_show_command(command, module,
+ command_type='cli_show_ascii')
+ command = 'show install active'
+ active_body = execute_show_command(command, module,
+ command_type='cli_show_ascii')
+
+ if fixed_pkg not in inactive_body[0] and fixed_pkg not in active_body[0]:
+ commands.append('install add {0}{1}'.format(file_system, pkg))
+
+ if fixed_pkg not in active_body[0]:
+ commands.append('install activate {0}{1} force'.format(
+ file_system, pkg))
+ command = 'show install committed'
+ install_body = execute_show_command(command, module,
+ command_type='cli_show_ascii')
+ if fixed_pkg not in install_body[0]:
+ commands.append('install commit {0}{1}'.format(file_system, pkg))
+
+ return commands
+
+
+def main():
+ argument_spec = dict(
+ pkg=dict(required=True),
+ file_system=dict(required=False, default='bootflash:'),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ pkg = module.params['pkg']
+ file_system = module.params['file_system']
+ changed = False
+ remote_exists = remote_file_exists(module, pkg, file_system=file_system)
+
+ if not remote_exists:
+ module.fail_json(msg="The requested package doesn't exist "
+ "on the device")
+
+ commands = get_commands(module, pkg, file_system)
+ if not module.check_mode and commands:
+ try:
+ apply_patch(module, commands)
+ changed = True
+ except ShellError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ if 'configure' in commands:
+ commands.pop(0)
+
+ module.exit_json(changed=changed,
+ pkg=pkg,
+ file_system=file_system,
+ updates=commands)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_snapshot.py b/network/nxos/nxos_snapshot.py
new file mode 100644
index 00000000000..e6f367d7bfa
--- /dev/null
+++ b/network/nxos/nxos_snapshot.py
@@ -0,0 +1,674 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_snapshot
+version_added: "2.2"
+short_description: Manage snapshots of the running states of selected features.
+description:
+ - Create snapshots of the running states of selected features, add
+ new show commands for snapshot creation, delete and compare
+ existing snapshots.
+extends_documentation_fragment: nxos
+author:
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - C(transport=cli) may cause timeout errors.
+ - The C(element_key1) and C(element_key2) parameter specify the tags used
+ to distinguish among row entries. In most cases, only the element_key1
+ parameter needs to specified to be able to distinguish among row entries.
+ - C(action=compare) will always store a comparison report on a local file.
+options:
+ action:
+ description:
+ - Define what snapshot action the module would perform.
+ required: true
+ choices: ['create','add','compare','delete']
+ snapshot_name:
+ description:
+ - Snapshot name, to be used when C(action=create)
+ or C(action=delete).
+ required: false
+ default: null
+ description:
+ description:
+ - Snapshot description to be used when C(action=create).
+ required: false
+ default: null
+ snapshot1:
+ description:
+ - First snapshot to be used when C(action=compare).
+ required: false
+ default: null
+ snapshot2:
+ description:
+ - Second snapshot to be used when C(action=compare).
+ required: false
+ default: null
+ comparison_results_file:
+ description:
+ - Name of the file where snapshots comparison will be store.
+ required: false
+ default: null
+ compare_option:
+ description:
+ - Snapshot options to be used when C(action=compare).
+ required: false
+ default: null
+ choices: ['summary','ipv4routes','ipv6routes']
+ section:
+ description:
+ - Used to name the show command output, to be used
+ when C(action=add).
+ required: false
+ default: null
+ show_command:
+ description:
+ - Specify a new show command, to be used when C(action=add).
+ required: false
+ default: null
+ row_id:
+ description:
+ - Specifies the tag of each row entry of the show command's
+ XML output, to be used when C(action=add).
+ required: false
+ default: null
+ element_key1:
+ description:
+ - Specify the tags used to distinguish among row entries,
+ to be used when C(action=add).
+ required: false
+ default: null
+ element_key2:
+ description:
+ - Specify the tags used to distinguish among row entries,
+ to be used when C(action=add).
+ required: false
+ default: null
+ save_snapshot_locally:
+ description:
+ - Specify to locally store a new created snapshot,
+ to be used when C(action=create).
+ required: false
+ default: false
+ choices: ['true','false']
+ path:
+ description:
+ - Specify the path of the file where new created snapshot or
+ snapshots comparison will be stored, to be used when
+ C(action=create) and C(save_snapshot_locally=true) or
+ C(action=compare).
+ required: false
+ default: './'
+'''
+
+EXAMPLES = '''
+# Create a snapshot and store it locally
+- nxos_snapshot:
+ action: create
+ snapshot_name: test_snapshot
+ description: Done with Ansible
+ save_snapshot_locally: true
+ path: /home/user/snapshots/
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# Delete a snapshot
+- nxos_snapshot:
+ action: delete
+ snapshot_name: test_snapshot
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# Delete all existing snapshots
+- nxos_snapshot:
+ action: delete_all
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# Add a show command for snapshots creation
+- nxos_snapshot:
+ section: myshow
+ show_command: show ip interface brief
+ row_id: ROW_intf
+ element_key1: intf-name
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# Compare two snapshots
+- nxos_snapshot:
+ action: compare
+ snapshot1: pre_snapshot
+ snapshot2: post_snapshot
+ comparison_results_file: compare_snapshots.txt
+ compare_option: summary
+ path: '../snapshot_reports/'
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+existing_snapshots:
+ description: list of existing snapshots.
+ returned: verbose mode
+ type: list
+ sample: [{"date": "Tue Sep 13 10:58:08 2016",
+ "description": "First snapshot", "name": "first_snap"},
+ {"date": "Tue Sep 13 10:27:31 2016", "description": "Pre-snapshot",
+ "name": "pre_snapshot"}]
+final_snapshots:
+ description: list of final snapshots.
+ returned: verbose mode
+ type: list
+ sample: [{"date": "Tue Sep 13 10:58:08 2016",
+ "description": "First snapshot", "name": "first_snap"},
+ {"date": "Tue Sep 13 10:27:31 2016", "description": "Pre-snapshot",
+ "name": "pre_snapshot"},
+ {"date": "Tue Sep 13 10:37:50 2016", "description": "Post-snapshot",
+ "name": "post_snapshot"}]
+report_file:
+ description: name of the file where the new snapshot or snapshots
+ comparison have been stored.
+ returned: verbose mode
+ type: string
+ sample: "/home/gabriele/Desktop/ntc-ansible/ansible_snapshot"
+updates:
+ description: commands sent to the device
+ returned: verbose mode
+ type: list
+ sample: ["snapshot create post_snapshot Post-snapshot"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import os
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show_ascii'):
+ cmds = [command]
+ if module.params['transport'] == 'cli':
+ body = execute_show(cmds, module)
+ elif module.params['transport'] == 'nxapi':
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def get_existing(module):
+ existing = []
+ command = 'show snapshots'
+
+ body = execute_show_command(command, module)[0]
+ if body:
+ split_body = body.splitlines()
+ snapshot_regex = ('(?P\S+)\s+(?P\w+\s+\w+\s+\d+\s+\d+'
+ ':\d+:\d+\s+\d+)\s+(?P.*)')
+ for snapshot in split_body:
+ temp = {}
+ try:
+ match_snapshot = re.match(snapshot_regex, snapshot, re.DOTALL)
+ snapshot_group = match_snapshot.groupdict()
+ temp['name'] = snapshot_group['name']
+ temp['date'] = snapshot_group['date']
+ temp['description'] = snapshot_group['description']
+ existing.append(temp)
+ except AttributeError:
+ pass
+
+ return existing
+
+
+def action_create(module, existing_snapshots):
+ commands = list()
+ exist = False
+ for snapshot in existing_snapshots:
+ if module.params['snapshot_name'] == snapshot['name']:
+ exist = True
+
+ if exist is False:
+ commands.append('snapshot create {0} {1}'.format(
+ module.params['snapshot_name'], module.params['description']))
+
+ return commands
+
+
+def action_add(module, existing_snapshots):
+ commands = list()
+ command = 'show snapshot sections'
+ sections = []
+ body = execute_show_command(command, module)[0]
+
+ if body:
+ section_regex = '.*\[(?P\S+)\].*'
+ split_body = body.split('\n\n')
+ for section in split_body:
+ temp = {}
+ for line in section.splitlines():
+ try:
+ match_section = re.match(section_regex, section, re.DOTALL)
+ temp['section'] = match_section.groupdict()['section']
+ except (AttributeError, KeyError):
+ pass
+
+ if 'show command' in line:
+ temp['show_command'] = line.split('show command: ')[1]
+ elif 'row id' in line:
+ temp['row_id'] = line.split('row id: ')[1]
+ elif 'key1' in line:
+ temp['element_key1'] = line.split('key1: ')[1]
+ elif 'key2' in line:
+ temp['element_key2'] = line.split('key2: ')[1]
+
+ if temp:
+ sections.append(temp)
+
+ proposed = {
+ 'section': module.params['section'],
+ 'show_command': module.params['show_command'],
+ 'row_id': module.params['row_id'],
+ 'element_key1': module.params['element_key1'],
+ 'element_key2': module.params['element_key2'] or '-',
+ }
+
+ if proposed not in sections:
+ if module.params['element_key2']:
+ commands.append('snapshot section add {0} "{1}" {2} {3} {4}'.format(
+ module.params['section'], module.params['show_command'],
+ module.params['row_id'], module.params['element_key1'],
+ module.params['element_key2']))
+ else:
+ commands.append('snapshot section add {0} "{1}" {2} {3}'.format(
+ module.params['section'], module.params['show_command'],
+ module.params['row_id'], module.params['element_key1']))
+
+ return commands
+
+
+def action_compare(module, existing_snapshots):
+ command = 'show snapshot compare {0} {1}'.format(
+ module.params['snapshot1'], module.params['snapshot2'])
+
+ if module.params['compare_option']:
+ command += ' {0}'.format(module.params['compare_option'])
+
+ body = execute_show_command(command, module)[0]
+ return body
+
+
+def action_delete(module, existing_snapshots):
+ commands = list()
+
+ exist = False
+ for snapshot in existing_snapshots:
+ if module.params['snapshot_name'] == snapshot['name']:
+ exist = True
+
+ if exist:
+ commands.append('snapshot delete {0}'.format(
+ module.params['snapshot_name']))
+
+ return commands
+
+
+def action_delete_all(module, existing_snapshots):
+ commands = list()
+ if existing_snapshots:
+ commands.append('snapshot delete all')
+ return commands
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_snapshot(module):
+ command = 'show snapshot dump {0}'.format(module.params['snapshot_name'])
+ body = execute_show_command(command, module)[0]
+ return body
+
+
+def write_on_file(content, filename, module):
+ path = module.params['path']
+ if path[-1] != '/':
+ path += '/'
+ filepath = '{0}{1}'.format(path, filename)
+ try:
+ report = open(filepath, 'w')
+ report.write(content)
+ report.close()
+ except:
+ module.fail_json(msg="Error while writing on file.")
+
+ return filepath
+
+def main():
+ argument_spec = dict(
+ action=dict(required=True, choices=['create', 'add',
+ 'compare', 'delete',
+ 'delete_all']),
+ snapshot_name=dict(required=False, type='str'),
+ description=dict(required=False, type='str'),
+ snapshot1=dict(required=False, type='str'),
+ snapshot2=dict(required=False, type='str'),
+ compare_option=dict(required=False,
+ choices=['summary', 'ipv4routes', 'ipv6routes']),
+ comparison_results_file=dict(required=False, type='str'),
+ section=dict(required=False, type='str'),
+ show_command=dict(required=False, type='str'),
+ row_id=dict(required=False, type='str'),
+ element_key1=dict(required=False, type='str'),
+ element_key2=dict(required=False, type='str'),
+ save_snapshot_locally=dict(required=False, type='bool',
+ default=False),
+ path=dict(required=False, type='str', default='./')
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ mutually_exclusive=[['delete_all',
+ 'delete_snapshot']],
+ supports_check_mode=True)
+
+ action = module.params['action']
+ comparison_results_file = module.params['comparison_results_file']
+
+ CREATE_PARAMS = ['snapshot_name', 'description']
+ ADD_PARAMS = ['section', 'show_command', 'row_id', 'element_key1']
+ COMPARE_PARAMS = ['snapshot1', 'snapshot2', 'comparison_results_file']
+
+ if not os.path.isdir(module.params['path']):
+ module.fail_json(msg='{0} is not a valid directory name.'.format(
+ module.params['path']))
+
+ if action == 'create':
+ for param in CREATE_PARAMS:
+ if not module.params[param]:
+ module.fail_json(msg='snapshot_name and description are '
+ 'required when action=create')
+ elif action == 'add':
+ for param in ADD_PARAMS:
+ if not module.params[param]:
+ module.fail_json(msg='section, show_command, row_id '
+ 'and element_key1 are required '
+ 'when action=add')
+ elif action == 'compare':
+ for param in COMPARE_PARAMS:
+ if not module.params[param]:
+ module.fail_json(msg='snapshot1 and snapshot2 are required '
+ 'when action=create')
+ elif action == 'delete' and not module.params['snapshot_name']:
+ module.fail_json(msg='snapshot_name is required when action=delete')
+
+ existing_snapshots = invoke('get_existing', module)
+ final_snapshots = existing_snapshots
+ changed = False
+
+ action_results = invoke('action_%s' % action, module, existing_snapshots)
+
+ result = {}
+ written_file = ''
+ if module.check_mode and action != 'compare':
+ module.exit_json(changed=True, commands=action_results)
+ else:
+ if action == 'compare':
+ written_file = write_on_file(action_results,
+ module.params['comparison_results_file'],
+ module)
+ result['updates'] = []
+ else:
+ if action_results:
+ execute_config_command(action_results, module)
+ changed = True
+ final_snapshots = invoke('get_existing', module)
+ result['updates'] = action_results
+
+ if (action == 'create' and
+ module.params['save_snapshot_locally']):
+ snapshot = get_snapshot(module)
+ written_file = write_on_file(snapshot,
+ module.params['snapshot_name'], module)
+
+ result['connected'] = module.connected
+ result['changed'] = changed
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module)
+ result['final_snapshots'] = final_snapshots
+ result['existing_snapshots'] = existing_snapshots
+ if written_file:
+ result['report_file'] = written_file
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_snmp_community.py b/network/nxos/nxos_snmp_community.py
new file mode 100644
index 00000000000..93ed3f7be4b
--- /dev/null
+++ b/network/nxos/nxos_snmp_community.py
@@ -0,0 +1,503 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_snmp_community
+version_added: "2.2"
+short_description: Manages SNMP community configs.
+description:
+ - Manages SNMP community configuration.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+options:
+ community:
+ description:
+ - Case-sensitive community string.
+ required: true
+ access:
+ description:
+ - Access type for community.
+ required: false
+ default: null
+ choices: ['ro','rw']
+ group:
+ description:
+ - Group to which the community belongs.
+ required: false
+ default: null
+ acl:
+ description:
+ - ACL name to filter snmp requests.
+ required: false
+ default: 1
+ state:
+ description:
+ - Manage the state of the resource.
+ required: true
+ default: present
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+# ensure snmp community is configured
+- nxos_snmp_community:
+ community: TESTING7
+ group: network-operator
+ state: present
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"group": "network-operator"}
+existing:
+ description: k/v pairs of existing snmp community
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of snmp community after module execution
+ returned: always
+ type: dict or null
+ sample: {"acl": "None", "group": "network-operator"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["snmp-server community TESTING7 group network-operator"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0]:
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = str(value)
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_snmp_groups(module):
+ command = 'show snmp group'
+ data = execute_show_command(command, module)[0]
+
+ group_list = []
+
+ try:
+ group_table = data['TABLE_role']['ROW_role']
+ for group in group_table:
+ group_list.append(group['role_name'])
+ except (KeyError, AttributeError):
+ return group_list
+
+ return group_list
+
+
+def get_snmp_community(module, find_filter=None):
+ command = 'show snmp community'
+ data = execute_show_command(command, module)[0]
+
+ community_dict = {}
+
+ community_map = {
+ 'grouporaccess': 'group',
+ 'aclfilter': 'acl'
+ }
+
+ try:
+ community_table = data['TABLE_snmp_community']['ROW_snmp_community']
+ for each in community_table:
+ community = apply_key_map(community_map, each)
+ key = each['community_name']
+ community_dict[key] = community
+ except (KeyError, AttributeError):
+ return community_dict
+
+ if find_filter:
+ find = community_dict.get(find_filter, None)
+
+ if find_filter is None or find is None:
+ return {}
+ else:
+ fix_find = {}
+ for (key, value) in find.iteritems():
+ if isinstance(value, str):
+ fix_find[key] = value.strip()
+ else:
+ fix_find[key] = value
+ return fix_find
+
+
+def config_snmp_community(delta, community):
+ CMDS = {
+ 'group': 'snmp-server community {0} group {group}',
+ 'acl': 'snmp-server community {0} use-acl {acl}'
+ }
+ commands = []
+ for k, v in delta.iteritems():
+ cmd = CMDS.get(k).format(community, **delta)
+ if cmd:
+ commands.append(cmd)
+ cmd = None
+ return commands
+
+
+def main():
+ argument_spec = dict(
+ community=dict(required=True, type='str'),
+ access=dict(choices=['ro', 'rw']),
+ group=dict(type='str'),
+ acl=dict(type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ required_one_of=[['access', 'group']],
+ mutually_exclusive=[['access', 'group']],
+ supports_check_mode=True)
+
+ access = module.params['access']
+ group = module.params['group']
+ community = module.params['community']
+ acl = module.params['acl']
+ state = module.params['state']
+
+ if access:
+ if access == 'ro':
+ group = 'network-operator'
+ elif access == 'rw':
+ group = 'network-admin'
+
+ # group check - ensure group being configured exists on the device
+ configured_groups = get_snmp_groups(module)
+
+ if group not in configured_groups:
+ module.fail_json(msg="group not on switch."
+ "please add before moving forward")
+
+ existing = get_snmp_community(module, community)
+ args = dict(group=group, acl=acl)
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+
+ changed = False
+ end_state = existing
+ commands = []
+
+ if state == 'absent':
+ if existing:
+ command = "no snmp-server community {0}".format(community)
+ commands.append(command)
+ cmds = flatten_list(commands)
+ elif state == 'present':
+ if delta:
+ command = config_snmp_community(dict(delta), community)
+ commands.append(command)
+ cmds = flatten_list(commands)
+
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_snmp_community(module, community)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_snmp_contact.py b/network/nxos/nxos_snmp_contact.py
new file mode 100644
index 00000000000..b5b97da92c1
--- /dev/null
+++ b/network/nxos/nxos_snmp_contact.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_snmp_contact
+version_added: "2.2"
+short_description: Manages SNMP contact info.
+description:
+ - Manages SNMP contact information.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - C(state=absent) removes the contact configuration if it is configured.
+options:
+ contact:
+ description:
+ - Contact information.
+ required: true
+ state:
+ description:
+ - Manage the state of the resource.
+ required: true
+ default: present
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+# ensure snmp contact is configured
+- nxos_snmp_contact:
+ contact: Test
+ state: present
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"contact": "New_Test"}
+existing:
+ description: k/v pairs of existing snmp contact
+ type: dict
+ sample: {"contact": "Test"}
+end_state:
+ description: k/v pairs of snmp contact after module execution
+ returned: always
+ type: dict
+ sample: {"contact": "New_Test"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["snmp-server contact New_Test"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0]:
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_snmp_contact(module):
+ contact = {}
+ contact_regex = '.*snmp-server\scontact\s(?P\S+).*'
+ command = 'show run snmp'
+
+ body = execute_show_command(command, module, command_type='cli_show_ascii')[0]
+
+ try:
+ match_contact = re.match(contact_regex, body, re.DOTALL)
+ group_contact = match_contact.groupdict()
+ contact['contact'] = group_contact["contact"]
+ except AttributeError:
+ contact = {}
+
+ return contact
+
+
+def main():
+ argument_spec = dict(
+ contact=dict(required=True, type='str'),
+ state=dict(choices=['absent', 'present'],
+ default='present')
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ contact = module.params['contact']
+ state = module.params['state']
+
+ existing = get_snmp_contact(module)
+ changed = False
+ proposed = dict(contact=contact)
+ end_state = existing
+ commands = []
+
+ if state == 'absent':
+ if existing and existing['contact'] == contact:
+ commands.append('no snmp-server contact')
+ elif state == 'present':
+ if not existing or existing['contact'] != contact:
+ commands.append('snmp-server contact {0}'.format(contact))
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_snmp_contact(module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_snmp_host.py b/network/nxos/nxos_snmp_host.py
new file mode 100644
index 00000000000..366da22a918
--- /dev/null
+++ b/network/nxos/nxos_snmp_host.py
@@ -0,0 +1,642 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_snmp_host
+version_added: "2.2"
+short_description: Manages SNMP host configuration.
+description:
+ - Manages SNMP host configuration parameters.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - C(state=absent) removes the host configuration if it is configured.
+options:
+ snmp_host:
+ description:
+ - IP address of hostname of target host.
+ required: true
+ version:
+ description:
+ - SNMP version.
+ required: false
+ default: v2c
+ choices: ['v2c', 'v3']
+ community:
+ description:
+ - Community string or v3 username.
+ required: false
+ default: null
+ udp:
+ description:
+ - UDP port number (0-65535).
+ required: false
+ default: null
+ type:
+ description:
+ - type of message to send to host.
+ required: false
+ default: traps
+ choices: ['trap', 'inform']
+ vrf:
+ description:
+ - VRF to use to source traffic to source.
+ required: false
+ default: null
+ vrf_filter:
+ description:
+ - Name of VRF to filter.
+ required: false
+ default: null
+ src_intf:
+ description:
+ - Source interface.
+ required: false
+ default: null
+ state:
+ description:
+ - Manage the state of the resource.
+ required: true
+ default: present
+ choices: ['present','absent']
+
+'''
+
+EXAMPLES = '''
+# ensure snmp host is configured
+- nxos_snmp_host:
+ snmp_host: 3.3.3.3
+ community: TESTING
+ state: present
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"community": "TESTING", "snmp_host": "3.3.3.3",
+ "snmp_type": "trap", "version": "v2c", "vrf_filter": "one_more_vrf"}
+existing:
+ description: k/v pairs of existing snmp host
+ type: dict
+ sample: {"community": "TESTING", "snmp_type": "trap",
+ "udp": "162", "v3": "noauth", "version": "v2c",
+ "vrf": "test_vrf", "vrf_filter": ["test_vrf",
+ "another_test_vrf"]}
+end_state:
+ description: k/v pairs of switchport after module execution
+ returned: always
+ type: dict or null
+ sample: {"community": "TESTING", "snmp_type": "trap",
+ "udp": "162", "v3": "noauth", "version": "v2c",
+ "vrf": "test_vrf", "vrf_filter": ["test_vrf",
+ "another_test_vrf", "one_more_vrf"]}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["snmp-server host 3.3.3.3 filter-vrf another_test_vrf"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0]:
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = str(value)
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def get_snmp_host(host, module):
+ command = 'show snmp host'
+ body = execute_show_command(command, module)
+
+ host_map = {
+ 'port': 'udp',
+ 'version': 'version',
+ 'level': 'v3',
+ 'type': 'snmp_type',
+ 'secname': 'community'
+ }
+
+ resource = {}
+
+ if body:
+ try:
+ resource_table = body[0]['TABLE_host']['ROW_host']
+
+ if isinstance(resource_table, dict):
+ resource_table = [resource_table]
+
+ for each in resource_table:
+ key = str(each['host'])
+ src = each.get('src_intf', None)
+ host_resource = apply_key_map(host_map, each)
+
+ if src:
+ host_resource['src_intf'] = src.split(':')[1].strip()
+
+ vrf_filt = each.get('TABLE_vrf_filters', None)
+ if vrf_filt:
+ vrf_filter = vrf_filt['ROW_vrf_filters']['vrf_filter'].split(':')[1].split(',')
+ filters = [vrf.strip() for vrf in vrf_filter]
+ host_resource['vrf_filter'] = filters
+
+ vrf = each.get('vrf', None)
+ if vrf:
+ host_resource['vrf'] = vrf.split(':')[1].strip()
+ resource[key] = host_resource
+
+ except (KeyError, AttributeError, TypeError):
+ return resource
+
+ find = resource.get(host, None)
+
+ if find:
+ fix_find = {}
+ for (key, value) in find.iteritems():
+ if isinstance(value, str):
+ fix_find[key] = value.strip()
+ else:
+ fix_find[key] = value
+ return fix_find
+ else:
+ return {}
+ else:
+ return {}
+
+
+def remove_snmp_host(host, existing):
+ commands = []
+ if existing['version'] == 'v3':
+ existing['version'] = '3'
+ command = 'no snmp-server host {0} {snmp_type} version \
+ {version} {v3} {community}'.format(host, **existing)
+
+ elif existing['version'] == 'v2c':
+ existing['version'] = '2c'
+ command = 'no snmp-server host {0} {snmp_type} version \
+ {version} {community}'.format(host, **existing)
+
+ if command:
+ commands.append(command)
+ return commands
+
+
+def config_snmp_host(delta, proposed, existing, module):
+ commands = []
+ command_builder = []
+ host = proposed['snmp_host']
+ cmd = 'snmp-server host {0}'.format(proposed['snmp_host'])
+
+ snmp_type = delta.get('snmp_type', None)
+ version = delta.get('version', None)
+ ver = delta.get('v3', None)
+ community = delta.get('community', None)
+
+ command_builder.append(cmd)
+ if any([snmp_type, version, ver, community]):
+ type_string = snmp_type or existing.get('type')
+ if type_string:
+ command_builder.append(type_string)
+
+ version = version or existing.get('version')
+ if version:
+ if version == 'v2c':
+ vn = '2c'
+ elif version == 'v3':
+ vn = '3'
+
+ version_string = 'version {0}'.format(vn)
+ command_builder.append(version_string)
+
+ if ver:
+ ver_string = ver or existing.get('v3')
+ command_builder.append(ver_string)
+
+ if community:
+ community_string = community or existing.get('community')
+ command_builder.append(community_string)
+
+ cmd = ' '.join(command_builder)
+
+ commands.append(cmd)
+
+ CMDS = {
+ 'vrf_filter': 'snmp-server host {0} filter-vrf {vrf_filter}',
+ 'vrf': 'snmp-server host {0} use-vrf {vrf}',
+ 'udp': 'snmp-server host {0} udp-port {udp}',
+ 'src_intf': 'snmp-server host {0} source-interface {src_intf}'
+ }
+
+ for key, value in delta.iteritems():
+ if key in ['vrf_filter', 'vrf', 'udp', 'src_intf']:
+ command = CMDS.get(key, None)
+ if command:
+ cmd = command.format(host, **delta)
+ commands.append(cmd)
+ cmd = None
+ return commands
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def main():
+ argument_spec = dict(
+ snmp_host=dict(required=True, type='str'),
+ community=dict(type='str'),
+ udp=dict(type='str'),
+ version=dict(choices=['v2c', 'v3'], default='v2c'),
+ src_intf=dict(type='str'),
+ v3=dict(choices=['noauth', 'auth', 'priv']),
+ vrf_filter=dict(type='str'),
+ vrf=dict(type='str'),
+ snmp_type=dict(choices=['trap', 'inform'], default='trap'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+
+ snmp_host = module.params['snmp_host']
+ community = module.params['community']
+ udp = module.params['udp']
+ version = module.params['version']
+ src_intf = module.params['src_intf']
+ v3 = module.params['v3']
+ vrf_filter = module.params['vrf_filter']
+ vrf = module.params['vrf']
+ snmp_type = module.params['snmp_type']
+
+ state = module.params['state']
+
+ if snmp_type == 'inform' and version != 'v3':
+ module.fail_json(msg='inform requires snmp v3')
+
+ if version == 'v2c' and v3:
+ module.fail_json(msg='param: "v3" should not be used when '
+ 'using version v2c')
+
+ if not any([vrf_filter, vrf, udp, src_intf]):
+ if not all([snmp_type, version, community]):
+ module.fail_json(msg='when not configuring options like '
+ 'vrf_filter, vrf, udp, and src_intf,'
+ 'the following params are required: '
+ 'type, version, community')
+
+ if version == 'v3' and v3 is None:
+ module.fail_json(msg='when using version=v3, the param v3 '
+ '(options: auth, noauth, priv) is also required')
+
+ existing = get_snmp_host(snmp_host, module)
+
+ # existing returns the list of vrfs configured for a given host
+ # checking to see if the proposed is in the list
+ store = existing.get('vrf_filter', None)
+ if existing and store:
+ if vrf_filter not in existing['vrf_filter']:
+ existing['vrf_filter'] = None
+ else:
+ existing['vrf_filter'] = vrf_filter
+
+ args = dict(
+ community=community,
+ snmp_host=snmp_host,
+ udp=udp,
+ version=version,
+ src_intf=src_intf,
+ vrf_filter=vrf_filter,
+ v3=v3,
+ vrf=vrf,
+ snmp_type=snmp_type
+ )
+
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+
+ changed = False
+ commands = []
+ end_state = existing
+
+ if state == 'absent':
+ if existing:
+ command = remove_snmp_host(snmp_host, existing)
+ commands.append(command)
+ elif state == 'present':
+ if delta:
+ command = config_snmp_host(delta, proposed, existing, module)
+ commands.append(command)
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_snmp_host(snmp_host, module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ if store:
+ existing['vrf_filter'] = store
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/network/nxos/nxos_snmp_location.py b/network/nxos/nxos_snmp_location.py
new file mode 100644
index 00000000000..e3b90973e72
--- /dev/null
+++ b/network/nxos/nxos_snmp_location.py
@@ -0,0 +1,422 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_snmp_location
+version_added: "2.2"
+short_description: Manages SNMP location information.
+description:
+ - Manages SNMP location configuration.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+options:
+ location:
+ description:
+ - Location information.
+ required: true
+ state:
+ description:
+ - Manage the state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+# ensure snmp location is configured
+- nxos_snmp_location:
+ location: Test
+ state: present
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# ensure snmp location is not configured
+- nxos_snmp_location:
+ location: Test
+ state: absent
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"location": "New_Test"}
+existing:
+ description: k/v pairs of existing snmp location
+ type: dict
+ sample: {"location": "Test"}
+end_state:
+ description: k/v pairs of location info after module execution
+ returned: always
+ type: dict or null
+ sample: {"location": "New_Test"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["snmp-server location New_Test"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0]:
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = str(value)
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_snmp_location(module):
+ location = {}
+ location_regex = '.*snmp-server\slocation\s(?P\S+).*'
+ command = 'show run snmp'
+
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+ try:
+ match_location = re.match(location_regex, body[0], re.DOTALL)
+ group_location = match_location.groupdict()
+ location['location'] = group_location["location"]
+ except (AttributeError, TypeError):
+ location = {}
+
+ return location
+
+
+def main():
+ argument_spec = dict(
+ location=dict(required=True, type='str'),
+ state=dict(choices=['absent', 'present'],
+ default='present')
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+
+ location = module.params['location']
+ state = module.params['state']
+
+ existing = get_snmp_location(module)
+ changed = False
+ commands = []
+ proposed = dict(location=location)
+ end_state = existing
+
+ if state == 'absent':
+ if existing and existing['location'] == location:
+ commands.append('no snmp-server location')
+ elif state == 'present':
+ if not existing or existing['location'] != location:
+ commands.append('snmp-server location {0}'.format(location))
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_snmp_location(module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+from ansible.module_utils.basic import *
+if __name__ == "__main__":
+ main()
diff --git a/network/nxos/nxos_snmp_traps.py b/network/nxos/nxos_snmp_traps.py
new file mode 100644
index 00000000000..632e8e8d1d7
--- /dev/null
+++ b/network/nxos/nxos_snmp_traps.py
@@ -0,0 +1,499 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_snmp_trap
+version_added: "2.2"
+short_description: Manages SNMP traps.
+description:
+ - Manages SNMP traps configurations.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+notes:
+ - This module works at the group level for traps. If you need to only
+ enable/disable 1 specific trap within a group, use the M(nxos_command)
+ module.
+ - Be aware that you can set a trap only for an enabled feature.
+options:
+ group:
+ description:
+ - Case sensitive group.
+ required: true
+ choices: ['aaa', 'bridge', 'callhome', 'cfs', 'config', 'entity',
+ 'feature-control', 'hsrp', 'license', 'link', 'lldp', 'ospf', 'pim',
+ 'rf', 'rmon', 'snmp', 'storm-control', 'stpx', 'sysmgr', 'system',
+ 'upgrade', 'vtp', 'all']
+ state:
+ description:
+ - Manage the state of the resource.
+ required: false
+ default: enabled
+ choices: ['enabled','disabled']
+'''
+
+EXAMPLES = '''
+# ensure lldp trap configured
+- nxos_snmp_traps:
+ group: lldp
+ state: enabled
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# ensure lldp trap is not configured
+- nxos_snmp_traps:
+ group: lldp
+ state: disabled
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"group": "lldp"}
+existing:
+ description: k/v pairs of existing trap status
+ type: dict
+ sample: {"lldp": [{"enabled": "No",
+ "trap": "lldpRemTablesChange"}]}
+end_state:
+ description: k/v pairs of trap info after module execution
+ returned: always
+ type: dict
+ sample: {"lldp": [{"enabled": "Yes",
+ "trap": "lldpRemTablesChange"}]}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: "snmp-server enable traps lldp ;"
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0]:
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = str(value)
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+
+def get_snmp_traps(group, module):
+ command = 'show snmp trap'
+ body = execute_show_command(command, module)
+
+ trap_key = {
+ 'description': 'trap',
+ 'isEnabled': 'enabled'
+ }
+
+ resource = {}
+
+ try:
+ resource_table = body[0]['TABLE_snmp_trap']['ROW_snmp_trap']
+
+ for each_feature in ['aaa', 'bridge', 'callhome', 'cfs', 'config',
+ 'entity', 'feature-control', 'hsrp', 'license',
+ 'link', 'lldp', 'ospf', 'pim', 'rf', 'rmon',
+ 'snmp', 'storm-control', 'stpx', 'sysmgr',
+ 'system', 'upgrade', 'vtp']:
+
+ resource[each_feature] = []
+
+ for each_resource in resource_table:
+ key = str(each_resource['trap_type'])
+ mapped_trap = apply_key_map(trap_key, each_resource)
+
+ if key != 'Generic':
+ resource[key].append(mapped_trap)
+
+ except (KeyError, AttributeError):
+ return resource
+
+ find = resource.get(group, None)
+
+ if group == 'all'.lower():
+ return resource
+ elif find:
+ trap_resource = {group: resource[group]}
+ return trap_resource
+ else:
+ # if 'find' is None, it means that 'group' is a
+ # currently disabled feature.
+ return {}
+
+
+def get_trap_commands(group, state, existing, module):
+ commands = []
+ enabled = False
+ disabled = False
+
+ if group == 'all':
+ if state == 'disabled':
+ for feature in existing:
+ trap_commands = ['no snmp-server enable traps {0}'.format(feature) for
+ trap in existing[feature] if trap['enabled'] == 'Yes']
+ trap_commands = list(set(trap_commands))
+ commands.append(trap_commands)
+
+ elif state == 'enabled':
+ for feature in existing:
+ trap_commands = ['snmp-server enable traps {0}'.format(feature) for
+ trap in existing[feature] if trap['enabled'] == 'No']
+ trap_commands = list(set(trap_commands))
+ commands.append(trap_commands)
+
+ else:
+ if group in existing:
+ for each_trap in existing[group]:
+ check = each_trap['enabled']
+ if check.lower() == 'yes':
+ enabled = True
+ if check.lower() == 'no':
+ disabled = True
+
+ if state == 'disabled' and enabled:
+ commands.append(['no snmp-server enable traps {0}'.format(group)])
+ elif state == 'enabled' and disabled:
+ commands.append(['snmp-server enable traps {0}'.format(group)])
+ else:
+ module.fail_json(msg='{0} is not a currently '
+ 'enabled feature.'.format(group))
+
+ return commands
+
+
+def main():
+ argument_spec = dict(
+ state=dict(choices=['enabled', 'disabled'], default='enabled'),
+ group=dict(choices=['aaa', 'bridge', 'callhome', 'cfs', 'config',
+ 'entity', 'feature-control', 'hsrp',
+ 'license', 'link', 'lldp', 'ospf', 'pim', 'rf',
+ 'rmon', 'snmp', 'storm-control', 'stpx',
+ 'sysmgr', 'system', 'upgrade', 'vtp', 'all'],
+ required=True),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ group = module.params['group'].lower()
+ state = module.params['state']
+
+ existing = get_snmp_traps(group, module)
+ proposed = {'group': group}
+
+ changed = False
+ end_state = existing
+ commands = get_trap_commands(group, state, existing, module)
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_snmp_traps(group, module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_snmp_user.py b/network/nxos/nxos_snmp_user.py
new file mode 100644
index 00000000000..a06a0151019
--- /dev/null
+++ b/network/nxos/nxos_snmp_user.py
@@ -0,0 +1,561 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_snmp_user
+version_added: "2.2"
+short_description: Manages SNMP users for monitoring.
+description:
+ - Manages SNMP user configuration.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+notes:
+ - Authentication parameters not idempotent.
+options:
+ user:
+ description:
+ - Name of the user.
+ required: true
+ group:
+ description:
+ - Group to which the user will belong to.
+ required: true
+ auth:
+ description:
+ - Auth parameters for the user.
+ required: false
+ default: null
+ choices: ['md5', 'sha']
+ pwd:
+ description:
+ - Auth password when using md5 or sha.
+ required: false
+ default: null
+ privacy:
+ description:
+ - Privacy password for the user.
+ required: false
+ default: null
+ encrypt:
+ description:
+ - Enables AES-128 bit encryption when using privacy password.
+ required: false
+ default: null
+ choices: ['true','false']
+ state:
+ description:
+ - Manage the state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+- nxos_snmp_user:
+ user: ntc
+ group: network-operator
+ auth: md5
+ pwd: test_password
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"authentication": "md5", "group": "network-operator",
+ "pwd": "test_password", "user": "ntc"}
+existing:
+ description:
+ - k/v pairs of existing configuration
+ type: dict
+ sample: {"authentication": "no", "encrypt": "none",
+ "group": ["network-operator"], "user": "ntc"}
+end_state:
+ description: k/v pairs configuration vtp after module execution
+ returned: always
+ type: dict
+ sample: {"authentication": "md5", "encrypt": "none",
+ "group": ["network-operator"], "user": "ntc"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["snmp-server user ntc network-operator auth md5 test_password"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module, text=False):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0] or response[0] == '\n':
+ body = []
+ elif 'show run' in command or text:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show', text=False):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command and text is False:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module, text=text)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_snmp_groups(module):
+ command = 'show snmp group'
+ body = execute_show_command(command, module)
+ g_list = []
+
+ try:
+ group_table = body[0]['TABLE_role']['ROW_role']
+ for each in group_table:
+ g_list.append(each['role_name'])
+
+ except (KeyError, AttributeError, IndexError):
+ return g_list
+
+ return g_list
+
+
+def get_snmp_user(user, module):
+ command = 'show snmp user {0}'.format(user)
+ body = execute_show_command(command, module, text=True)
+
+ if 'No such entry' not in body[0]:
+ body = execute_show_command(command, module)
+
+ resource = {}
+ group_list = []
+ try:
+ resource_table = body[0]['TABLE_snmp_users']['ROW_snmp_users']
+ resource['user'] = str(resource_table['user'])
+ resource['authentication'] = str(resource_table['auth']).strip()
+ encrypt = str(resource_table['priv']).strip()
+ if encrypt.startswith('aes'):
+ resource['encrypt'] = 'aes-128'
+ else:
+ resource['encrypt'] = 'none'
+
+ group_table = resource_table['TABLE_groups']['ROW_groups']
+
+ groups = []
+ try:
+ for group in group_table:
+ groups.append(str(group['group']).strip())
+ except TypeError:
+ groups.append(str(group_table['group']).strip())
+
+ resource['group'] = groups
+
+ except (KeyError, AttributeError, IndexError, TypeError):
+ return resource
+
+ return resource
+
+
+def remove_snmp_user(user):
+ return ['no snmp-server user {0}'.format(user)]
+
+
+def config_snmp_user(proposed, user, reset, new):
+ if reset and not new:
+ commands = remove_snmp_user(user)
+ else:
+ commands = []
+
+ group = proposed.get('group', None)
+
+ cmd = ''
+
+ if group:
+ cmd = 'snmp-server user {0} {group}'.format(user, **proposed)
+
+ auth = proposed.get('authentication', None)
+ pwd = proposed.get('pwd', None)
+
+ if auth and pwd:
+ cmd += ' auth {authentication} {pwd}'.format(**proposed)
+
+ encrypt = proposed.get('encrypt', None)
+ privacy = proposed.get('privacy', None)
+
+ if encrypt and privacy:
+ cmd += ' priv {encrypt} {privacy}'.format(**proposed)
+ elif privacy:
+ cmd += ' priv {privacy}'.format(**proposed)
+
+ if cmd:
+ commands.append(cmd)
+
+ return commands
+
+
+def main():
+ argument_spec = dict(
+ user=dict(required=True, type='str'),
+ group=dict(type='str', required=True),
+ pwd=dict(type='str'),
+ privacy=dict(type='str'),
+ authentication=dict(choices=['md5', 'sha']),
+ encrypt=dict(type='bool'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ required_together=[['authentication', 'pwd'],
+ ['encrypt', 'privacy']],
+ supports_check_mode=True)
+
+ user = module.params['user']
+ group = module.params['group']
+ pwd = module.params['pwd']
+ privacy = module.params['privacy']
+ encrypt = module.params['encrypt']
+ authentication = module.params['authentication']
+ state = module.params['state']
+
+ if privacy and encrypt:
+ if not pwd and authentication:
+ module.fail_json(msg='pwd and authentication must be provided '
+ 'when using privacy and encrypt')
+
+ if group and group not in get_snmp_groups(module):
+ module.fail_json(msg='group not configured yet on switch.')
+
+ existing = get_snmp_user(user, module)
+ end_state = existing
+
+ store = existing.get('group', None)
+ if existing:
+ if group not in existing['group']:
+ existing['group'] = None
+ else:
+ existing['group'] = group
+
+ changed = False
+ commands = []
+ proposed = {}
+
+ if state == 'absent' and existing:
+ commands.append(remove_snmp_user(user))
+
+ elif state == 'present':
+ new = False
+ reset = False
+
+ args = dict(user=user, pwd=pwd, group=group, privacy=privacy,
+ encrypt=encrypt, authentication=authentication)
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+
+ if not existing:
+ if encrypt:
+ proposed['encrypt'] = 'aes-128'
+ commands.append(config_snmp_user(proposed, user, reset, new))
+
+ elif existing:
+ if encrypt and not existing['encrypt'].startswith('aes'):
+ reset = True
+ proposed['encrypt'] = 'aes-128'
+
+ elif encrypt:
+ proposed['encrypt'] = 'aes-128'
+
+ delta = dict(
+ set(proposed.iteritems()).difference(existing.iteritems()))
+
+ if delta.get('pwd'):
+ delta['authentication'] = authentication
+
+ if delta:
+ delta['group'] = group
+
+ command = config_snmp_user(delta, user, reset, new)
+ commands.append(command)
+
+ cmds = flatten_list(commands)
+ results = {}
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_snmp_user(user, module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ if store:
+ existing['group'] = store
+
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['updates'] = cmds
+ results['changed'] = changed
+ results['end_state'] = end_state
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/network/nxos/nxos_static_route.py b/network/nxos/nxos_static_route.py
new file mode 100644
index 00000000000..2ff042d0307
--- /dev/null
+++ b/network/nxos/nxos_static_route.py
@@ -0,0 +1,472 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_static_route
+version_added: "2.2"
+short_description: Manages static route configuration
+description:
+ - Manages static route configuration
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - If no vrf is supplied, vrf is set to default.
+ - If C(state=absent), the route will be removed, regardless of the
+ non-required parameters.
+options:
+ prefix:
+ description:
+ - Destination prefix of static route.
+ required: true
+ next_hop:
+ description:
+ - Next hop address or interface of static route.
+ If interface, it must be the fully-qualified interface name.
+ required: true
+ vrf:
+ description:
+ - VRF for static route.
+ required: false
+ default: default
+ tag:
+ description:
+ - Route tag value (numeric).
+ required: false
+ default: null
+ route_name:
+ description:
+ - Name of the route. Used with the name parameter on the CLI.
+ required: false
+ default: null
+ pref:
+ description:
+ - Preference or administrative difference of route (range 1-255).
+ required: false
+ default: null
+ state:
+ description:
+ - Manage the state of the resource.
+ required: true
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+- nxos_static_route:
+ prefix: "192.168.20.64/24"
+ next_hop: "3.3.3.3"
+ route_name: testing
+ pref: 100
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"next_hop": "3.3.3.3", "pref": "100",
+ "prefix": "192.168.20.64/24", "route_name": "testing",
+ "vrf": "default"}
+existing:
+ description: k/v pairs of existing configuration
+ returned: verbose mode
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"next_hop": "3.3.3.3", "pref": "100",
+ "prefix": "192.168.20.0/24", "route_name": "testing",
+ "tag": null}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["ip route 192.168.20.0/24 3.3.3.3 name testing 100"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine, dumps
+from ansible.module_utils.network import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ if self._device_os == 'junos':
+ return dumps(section, output='lines')
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def state_present(module, candidate, prefix):
+ commands = list()
+ invoke('set_route', module, commands, prefix)
+ if commands:
+ if module.params['vrf'] == 'default':
+ candidate.add(commands, parents=[])
+ else:
+ candidate.add(commands, parents=['vrf context {0}'.format(module.params['vrf'])])
+
+
+def state_absent(module, candidate, prefix):
+ netcfg = get_config(module)
+ commands = list()
+ parents = 'vrf context {0}'.format(module.params['vrf'])
+ invoke('set_route', module, commands, prefix)
+ if module.params['vrf'] == 'default':
+ config = netcfg.get_section(commands[0])
+ if config:
+ invoke('remove_route', module, commands, config, prefix)
+ candidate.add(commands, parents=[])
+ else:
+ config = netcfg.get_section(parents)
+ splitted_config = config.split('\n')
+ splitted_config = map(str.strip, splitted_config)
+ if commands[0] in splitted_config:
+ invoke('remove_route', module, commands, config, prefix)
+ candidate.add(commands, parents=[parents])
+
+
+def fix_prefix_to_regex(prefix):
+ prefix = prefix.replace('.', '\.').replace('/', '\/')
+ return prefix
+
+
+def get_existing(module, prefix, warnings):
+ key_map = ['tag', 'pref', 'route_name', 'next_hop']
+ netcfg = get_config(module)
+ parents = 'vrf context {0}'.format(module.params['vrf'])
+ prefix_to_regex = fix_prefix_to_regex(prefix)
+
+ route_regex = ('.*ip\sroute\s{0}\s(?P\S+)(\sname\s(?P\S+))?'
+ '(\stag\s(?P\d+))?(\s(?P\d+)).*'.format(prefix_to_regex))
+
+ if module.params['vrf'] == 'default':
+ config = str(netcfg)
+ else:
+ config = netcfg.get_section(parents)
+
+ if config:
+ try:
+ match_route = re.match(route_regex, config, re.DOTALL)
+ group_route = match_route.groupdict()
+
+ for key in key_map:
+ if key not in group_route:
+ group_route[key] = ''
+ group_route['prefix'] = prefix
+ group_route['vrf'] = module.params['vrf']
+ except (AttributeError, TypeError):
+ group_route = {}
+ else:
+ group_route = {}
+ msg = ("VRF {0} didn't exist.".format(module.params['vrf']))
+ if msg not in warnings:
+ warnings.append(msg)
+
+ return group_route
+
+
+def remove_route(module, commands, config, prefix):
+ commands.append('no ip route {0} {1}'.format(prefix, module.params['next_hop']))
+
+
+def set_route(module, commands, prefix):
+ route_cmd = 'ip route {0} {1}'.format(prefix, module.params['next_hop'])
+
+ if module.params['route_name']:
+ route_cmd += ' name {0}'.format(module.params['route_name'])
+ if module.params['tag']:
+ route_cmd += ' tag {0}'.format(module.params['tag'])
+ if module.params['pref']:
+ route_cmd += ' {0}'.format(module.params['pref'])
+ commands.append(route_cmd)
+
+
+def get_dotted_mask(mask):
+ bits = 0
+ for i in xrange(32-mask,32):
+ bits |= (1 << i)
+ mask = ("%d.%d.%d.%d" % ((bits & 0xff000000) >> 24,
+ (bits & 0xff0000) >> 16, (bits & 0xff00) >> 8 , (bits & 0xff)))
+ return mask
+
+
+def get_network_start(address, netmask):
+ address = address.split('.')
+ netmask = netmask.split('.')
+ return [str(int(address[x]) & int(netmask[x])) for x in range(0, 4)]
+
+
+def network_from_string(address, mask, module):
+ octects = address.split('.')
+
+ if len(octects) > 4:
+ module.fail_json(msg='Incorrect address format.', address=address)
+
+ for octect in octects:
+ try:
+ if int(octect) < 0 or int(octect) > 255:
+ module.fail_json(msg='Address may contain invalid values.',
+ address=address)
+ except ValueError:
+ module.fail_json(msg='Address may contain non-integer values.',
+ address=address)
+
+ try:
+ if int(mask) < 0 or int(mask) > 32:
+ module.fail_json(msg='Incorrect mask value.', mask=mask)
+ except ValueError:
+ module.fail_json(msg='Mask may contain non-integer values.', mask=mask)
+
+ netmask = get_dotted_mask(int(mask))
+ return '.'.join(get_network_start(address, netmask))
+
+
+def normalize_prefix(module, prefix):
+ splitted_prefix = prefix.split('/')
+
+ address = splitted_prefix[0]
+ if len(splitted_prefix) > 2:
+ module.fail_json(msg='Incorrect address format.', address=address)
+ elif len(splitted_prefix) == 2:
+ mask = splitted_prefix[1]
+ network = network_from_string(address, mask, module)
+
+ normalized_prefix = str(network) + '/' + str(mask)
+ else:
+ normalized_prefix = prefix + '/' + str(32)
+
+ return normalized_prefix
+
+
+def main():
+ argument_spec = dict(
+ prefix=dict(required=True, type='str'),
+ next_hop=dict(required=True, type='str'),
+ vrf=dict(type='str', default='default'),
+ tag=dict(type='str'),
+ route_name=dict(type='str'),
+ pref=dict(type='str'),
+ state=dict(choices=['absent', 'present'],
+ default='present'),
+ include_defaults=dict(default=True),
+
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+
+ result = dict(changed=False)
+ warnings = list()
+ prefix = invoke('normalize_prefix', module, module.params['prefix'])
+
+ existing = invoke('get_existing', module, prefix, warnings)
+ end_state = existing
+
+ args = ['route_name', 'vrf', 'pref', 'tag', 'next_hop', 'prefix']
+ proposed = dict((k, v) for k, v in module.params.iteritems() if v is not None and k in args)
+
+ if state == 'present' or (state == 'absent' and existing):
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, candidate, prefix)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except Exception:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['warnings'] = warnings
+
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, prefix, warnings)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_switchport.py b/network/nxos/nxos_switchport.py
index 1f7f730a0d7..f0e06163ce1 100644
--- a/network/nxos/nxos_switchport.py
+++ b/network/nxos/nxos_switchport.py
@@ -16,11 +16,15 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: nxos_switchport
version_added: "2.1"
-short_description: Manages Layer 2 switchport interfaces
+short_description: Manages Layer 2 switchport interfaces.
extends_documentation_fragment: nxos
description:
- Manages Layer 2 interfaces
@@ -28,12 +32,12 @@
notes:
- When C(state=absent), VLANs can be added/removed from trunk links and
the existing access VLAN can be 'unconfigured' to just having VLAN 1
- on that interface
+ on that interface.
- When working with trunks VLANs the keywords add/remove are always sent
in the `switchport trunk allowed vlan` command. Use verbose mode to see
commands sent.
- When C(state=unconfigured), the interface will result with having a default
- Layer 2 interface, i.e. vlan 1 in access mode
+ Layer 2 interface, i.e. vlan 1 in access mode.
options:
interface:
description:
@@ -77,28 +81,47 @@
required: false
version_added: 2.2
default: null
-
'''
EXAMPLES = '''
-# ENSURE Eth1/5 is in its default switchport state
-- nxos_switchport: interface=eth1/5 state=unconfigured host={{ inventory_hostname }}
-
-# ENSURE Eth1/5 is configured for access vlan 20
-- nxos_switchport: interface=eth1/5 mode=access access_vlan=20 host={{ inventory_hostname }}
-
-# ENSURE Eth1/5 only has vlans 5-10 as trunk vlans
-- nxos_switchport: interface=eth1/5 mode=trunk native_vlan=10 trunk_vlans=5-10 host={{ inventory_hostname }}
-
-# Ensure eth1/5 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged)
-- nxos_switchport: interface=eth1/5 mode=trunk native_vlan=10 trunk_vlans=2-50 host={{ inventory_hostname }}
-
-# Ensure these VLANs are not being tagged on the trunk
-- nxos_switchport: interface=eth1/5 mode=trunk trunk_vlans=51-4094 host={{ inventory_hostname }} state=absent
-
+- name: Ensure Eth1/5 is in its default switchport state
+ nxos_switchport:
+ interface: eth1/5
+ state: unconfigured
+ host: "{{ inventory_hostname }}"
+
+- name: Ensure Eth1/5 is configured for access vlan 20
+ nxos_switchport:
+ interface: eth1/5
+ mode: access
+ access_vlan: 20
+ host: "{{ inventory_hostname }}"
+
+- name: Ensure Eth1/5 only has vlans 5-10 as trunk vlans
+ nxos_switchport:
+ interface: eth1/5
+ mode: trunk
+ native_vlan: 10
+ trunk_vlans: 5-10
+ host: "{{ inventory_hostname }}"
+
+- name: Ensure eth1/5 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged)
+ nxos_switchport:
+ interface: eth1/5
+ mode: trunk
+ native_vlan: 10
+ trunk_vlans: 2-50
+ host: "{{ inventory_hostname }}"
+
+- name: Ensure these VLANs are not being tagged on the trunk
+ nxos_switchport:
+ interface: eth1/5
+ mode: trunk
+ trunk_vlans: 51-4094
+ host: "{{ inventory_hostname }} "
+ state: absent
'''
RETURN = '''
-
proposed:
description: k/v pairs of parameters passed into module
returned: always
@@ -119,11 +142,6 @@
"interface": "Ethernet1/5", "mode": "access",
"native_vlan": "1", "native_vlan_name": "default",
"switchport": "Enabled", "trunk_vlans": "1-4094"}
-state:
- description: state as sent in from the playbook
- returned: always
- type: string
- sample: "present"
updates:
description: command string sent to the device
returned: always
@@ -134,21 +152,172 @@
returned: always
type: boolean
sample: true
-
'''
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
def get_interface_type(interface):
"""Gets the type of interface
-
Args:
interface (str): full name of interface, i.e. Ethernet1/1, loopback10,
port-channel20, vlan20
-
Returns:
type of interface: ethernet, svi, loopback, management, portchannel,
or unknown
-
"""
if interface.upper().startswith('ET'):
return 'ethernet'
@@ -168,16 +337,13 @@ def get_interface_type(interface):
def get_interface_mode(interface, module):
"""Gets current mode of interface: layer2 or layer3
-
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class within device.py
interface (string): full name of interface, i.e. Ethernet1/1,
loopback10, port-channel20, vlan20
-
Returns:
str: 'layer2' or 'layer3'
-
"""
command = 'show interface ' + interface
intf_type = get_interface_type(interface)
@@ -205,13 +371,10 @@ def get_interface_mode(interface, module):
def interface_is_portchannel(interface, module):
"""Checks to see if an interface is part of portchannel bundle
-
Args:
interface (str): full name of interface, i.e. Ethernet1/1
-
Returns:
True/False based on if interface is a member of a portchannel bundle
-
"""
intf_type = get_interface_type(interface)
if intf_type == 'ethernet':
@@ -234,15 +397,12 @@ def interface_is_portchannel(interface, module):
def get_switchport(port, module):
"""Gets current config of L2 switchport
-
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class within device.py
port (str): full name of interface, i.e. Ethernet1/1
-
Returns:
dictionary with k/v pairs for L2 vlan config
-
"""
command = 'show interface {0} switchport'.format(port)
@@ -356,14 +516,11 @@ def get_switchport_config_commands(interface, existing, proposed, module):
def is_switchport_default(existing):
"""Determines if switchport has a default config based on mode
-
Args:
- existing (dict): existing switcport configuration from Ansible mod
-
+ existing (dict): existing switchport configuration from Ansible mod
Returns:
boolean: True if switchport has OOB Layer 2 config, i.e.
vlan 1 and trunk all and mode is access
-
"""
c1 = existing['access_vlan'] == '1'
@@ -455,6 +612,15 @@ def execute_config_command(commands, module):
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
@@ -462,10 +628,13 @@ def get_cli_body_ssh(command, response, module):
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
- resource doesn't exist yet.
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
"""
- if 'xml' in response[0]:
+ if 'xml' in response[0] or response[0] == '\n':
body = []
+ elif 'status' in command:
+ body = response
else:
try:
body = [json.loads(response[0])]
@@ -476,6 +645,11 @@ def get_cli_body_ssh(command, response, module):
def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
@@ -483,15 +657,28 @@ def execute_show(cmds, module, command_type=None):
response = module.execute(cmds)
except ShellError:
clie = get_exception()
- module.fail_json(msg='Error sending {0}'.format(command),
+ module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
-
if module.params['transport'] == 'cli':
- command += ' | json'
+ if 'status' not in command:
+ command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
@@ -524,11 +711,11 @@ def main():
state=dict(choices=['absent', 'present', 'unconfigured'],
default='present')
)
- module = get_module(argument_spec=argument_spec,
- mutually_exclusive=[['access_vlan', 'trunk_vlans'],
- ['access_vlan', 'native_vlan'],
- ['access_vlan', 'trunk_allowed_vlans']],
- supports_check_mode=True)
+ module = get_network_module(argument_spec=argument_spec,
+ mutually_exclusive=[['access_vlan', 'trunk_vlans'],
+ ['access_vlan', 'native_vlan'],
+ ['access_vlan', 'trunk_allowed_vlans']],
+ supports_check_mode=True)
interface = module.params['interface']
mode = module.params['mode']
@@ -632,22 +819,17 @@ def main():
changed = True
execute_config_command(cmds, module)
end_state = get_switchport(interface, module)
+ if 'configure' in cmds:
+ cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
- results['state'] = state
results['updates'] = cmds
results['changed'] = changed
module.exit_json(**results)
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.nxos import *
-
if __name__ == '__main__':
- main()
\ No newline at end of file
+ main()
diff --git a/network/nxos/nxos_udld.py b/network/nxos/nxos_udld.py
new file mode 100644
index 00000000000..8318f4025be
--- /dev/null
+++ b/network/nxos/nxos_udld.py
@@ -0,0 +1,505 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+
+module: nxos_udld
+version_added: "2.2"
+short_description: Manages UDLD global configuration params.
+description:
+ - Manages UDLD global configuration params.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+notes:
+ - When C(state=absent), it unconfigures existing settings C(msg_time) and set it
+ to its default value of 15. It is cleaner to always use C(state=present).
+ - Module will fail if the udld feature has not been previously enabled.
+options:
+ aggressive:
+ description:
+ - Toggles aggressive mode.
+ required: false
+ default: null
+ choices: ['enabled','disabled']
+ msg_time:
+ description:
+ - Message time in seconds for UDLD packets.
+ required: false
+ default: null
+ reset:
+ description:
+ - Ability to reset UDLD down interfaces.
+ required: false
+ default: null
+ choices: ['true','false']
+ state:
+ description:
+ - Manage the state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+
+'''
+EXAMPLES = '''
+# ensure udld aggressive mode is globally disabled and se global message interval is 20
+- nxos_udld:
+ aggressive: disabled
+ msg_time: 20
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# Ensure agg mode is globally enabled and msg time is 15
+- nxos_udld:
+ aggressive: enabled
+ msg_time: 15
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"aggressive": "enabled", "msg_time": "40"}
+existing:
+ description:
+ - k/v pairs of existing udld configuration
+ type: dict
+ sample: {"aggressive": "disabled", "msg_time": "15"}
+end_state:
+ description: k/v pairs of udld configuration after module execution
+ returned: always
+ type: dict
+ sample: {"aggressive": "enabled", "msg_time": "40"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["udld message-time 40", "udld aggressive"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0] or response[0] == '\n':
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = str(value)
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+
+def get_commands_config_udld_global(delta, reset):
+ config_args = {
+ 'enabled': 'udld aggressive',
+ 'disabled': 'no udld aggressive',
+ 'msg_time': 'udld message-time {msg_time}'
+ }
+ commands = []
+ for param, value in delta.iteritems():
+ if param == 'aggressive':
+ if value == 'enabled':
+ command = 'udld aggressive'
+ elif value == 'disabled':
+ command = 'no udld aggressive'
+ else:
+ command = config_args.get(param, 'DNE').format(**delta)
+ if command and command != 'DNE':
+ commands.append(command)
+ command = None
+
+ if reset:
+ command = 'udld reset'
+ commands.append(command)
+ return commands
+
+
+def get_commands_remove_udld_global(delta):
+ config_args = {
+ 'aggressive': 'no udld aggressive',
+ 'msg_time': 'no udld message-time {msg_time}',
+ }
+ commands = []
+ for param, value in delta.iteritems():
+ command = config_args.get(param, 'DNE').format(**delta)
+ if command and command != 'DNE':
+ commands.append(command)
+ command = None
+ return commands
+
+
+def get_udld_global(module):
+ command = 'show udld global'
+ udld_table = execute_show_command(command, module)[0]
+
+ status = str(udld_table.get('udld-global-mode', None))
+ if status == 'enabled-aggressive':
+ aggressive = 'enabled'
+ else:
+ aggressive = 'disabled'
+
+ interval = str(udld_table.get('message-interval', None))
+ udld = dict(msg_time=interval, aggressive=aggressive)
+
+ return udld
+
+
+def main():
+ argument_spec = dict(
+ aggressive=dict(required=False, choices=['enabled', 'disabled']),
+ msg_time=dict(required=False, type='str'),
+ reset=dict(required=False, type='bool'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ required_one_of=[['aggressive', 'msg_time', 'reset']],
+ supports_check_mode=True)
+
+ aggressive = module.params['aggressive']
+ msg_time = module.params['msg_time']
+ reset = module.params['reset']
+ state = module.params['state']
+
+ if (aggressive or reset) and state == 'absent':
+ module.fail_json(msg="It's better to use state=present when "
+ "configuring or unconfiguring aggressive mode "
+ "or using reset flag. state=absent is just for "
+ "when using msg_time param.")
+
+ if msg_time:
+ try:
+ msg_time_int = int(msg_time)
+ if msg_time_int < 7 or msg_time_int > 90:
+ raise ValueError
+ except ValueError:
+ module.fail_json(msg='msg_time must be an integer'
+ 'between 7 and 90')
+
+ args = dict(aggressive=aggressive, msg_time=msg_time, reset=reset)
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+
+ existing = get_udld_global(module)
+ end_state = existing
+
+ delta = set(proposed.iteritems()).difference(existing.iteritems())
+ changed = False
+
+ commands = []
+ if state == 'present':
+ if delta:
+ command = get_commands_config_udld_global(dict(delta), reset)
+ commands.append(command)
+
+ elif state == 'absent':
+ common = set(proposed.iteritems()).intersection(existing.iteritems())
+ if common:
+ command = get_commands_remove_udld_global(dict(common))
+ commands.append(command)
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_udld_global(module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_udld_interface.py b/network/nxos/nxos_udld_interface.py
new file mode 100644
index 00000000000..f73670c2b1a
--- /dev/null
+++ b/network/nxos/nxos_udld_interface.py
@@ -0,0 +1,518 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_udld_interface
+version_added: "2.2"
+short_description: Manages UDLD interface configuration params.
+description:
+ - Manages UDLD interface configuration params.
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+notes:
+ - Feature UDLD must be enabled on the device to use this module.
+options:
+ mode:
+ description:
+ - Manages UDLD mode for an interface.
+ required: true
+ choices: ['enabled','disabled','aggressive']
+ interface:
+ description:
+ - FULL name of the interface, i.e. Ethernet1/1-
+ required: true
+ state:
+ description:
+ - Manage the state of the resource.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+EXAMPLES = '''
+# ensure Ethernet1/1 is configured to be in aggressive mode
+- nxos_udld_interface:
+ interface: Ethernet1/1
+ mode: aggressive
+ state: present
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# Remove the aggressive config only if it's currently in aggressive mode and then disable udld (switch default)
+- nxos_udld_interface:
+ interface: Ethernet1/1
+ mode: aggressive
+ state: absent
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# ensure Ethernet1/1 has aggressive mode enabled
+- nxos_udld_interface:
+ interface: Ethernet1/1
+ mode: enabled
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"mode": "enabled"}
+existing:
+ description:
+ - k/v pairs of existing configuration
+ type: dict
+ sample: {"mode": "aggressive"}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: always
+ type: dict
+ sample: {"mode": "enabled"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["interface ethernet1/33",
+ "no udld aggressive ; no udld disable"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0] or response[0] == '\n':
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_udld_interface(module, interface):
+ command = 'show udld {0}'.format(interface)
+ interface_udld = {}
+ mode = None
+ try:
+ body = execute_show_command(command, module)[0]
+ table = body['TABLE_interface']['ROW_interface']
+
+ status = str(table.get('mib-port-status', None))
+ # Note: 'mib-aggresive-mode' is NOT a typo
+ agg = str(table.get('mib-aggresive-mode', 'disabled'))
+
+ if agg == 'enabled':
+ mode = 'aggressive'
+ else:
+ mode = status
+
+ interface_udld['mode'] = mode
+
+ except (KeyError, AttributeError, IndexError):
+ interface_udld = {}
+
+ return interface_udld
+
+
+def is_interface_copper(module, interface):
+ command = 'show interface status'
+ copper = []
+ try:
+ body = execute_show_command(command, module)[0]
+ table = body['TABLE_interface']['ROW_interface']
+ for each in table:
+ itype = each.get('type', 'DNE')
+ if 'CU' in itype or '1000' in itype or '10GBaseT' in itype:
+ copper.append(str(each['interface'].lower()))
+ except (KeyError, AttributeError):
+ pass
+
+ if interface in copper:
+ found = True
+ else:
+ found = False
+
+ return found
+
+
+def get_commands_config_udld_interface(delta, interface, module, existing):
+ commands = []
+ copper = is_interface_copper(module, interface)
+ if delta:
+ mode = delta['mode']
+ if mode == 'aggressive':
+ command = 'udld aggressive'
+ elif copper:
+ if mode == 'enabled':
+ if existing['mode'] == 'aggressive':
+ command = 'no udld aggressive ; udld enable'
+ else:
+ command = 'udld enable'
+ elif mode == 'disabled':
+ command = 'no udld enable'
+ elif not copper:
+ if mode == 'enabled':
+ if existing['mode'] == 'aggressive':
+ command = 'no udld aggressive ; no udld disable'
+ else:
+ command = 'no udld disable'
+ elif mode == 'disabled':
+ command = 'udld disable'
+ if command:
+ commands.append(command)
+ commands.insert(0, 'interface {0}'.format(interface))
+
+ return commands
+
+
+def get_commands_remove_udld_interface(delta, interface, module, existing):
+ commands = []
+ copper = is_interface_copper(module, interface)
+
+ if delta:
+ mode = delta['mode']
+ if mode == 'aggressive':
+ command = 'no udld aggressive'
+ elif copper:
+ if mode == 'enabled':
+ command = 'no udld enable'
+ elif mode == 'disabled':
+ command = 'udld enable'
+ elif not copper:
+ if mode == 'enabled':
+ command = 'udld disable'
+ elif mode == 'disabled':
+ command = 'no udld disable'
+ if command:
+ commands.append(command)
+ commands.insert(0, 'interface {0}'.format(interface))
+
+ return commands
+
+
+def main():
+ argument_spec = dict(
+ mode=dict(choices=['enabled', 'disabled', 'aggressive'],
+ required=True),
+ interface=dict(type='str', required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ interface = module.params['interface'].lower()
+ mode = module.params['mode']
+ state = module.params['state']
+
+ proposed = dict(mode=mode)
+ existing = get_udld_interface(module, interface)
+ end_state = existing
+
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+
+ changed = False
+ commands = []
+ if state == 'present':
+ if delta:
+ command = get_commands_config_udld_interface(delta, interface,
+ module, existing)
+ commands.append(command)
+ elif state == 'absent':
+ common = set(proposed.iteritems()).intersection(existing.iteritems())
+ if common:
+ command = get_commands_remove_udld_interface(
+ dict(common), interface, module, existing
+ )
+ commands.append(command)
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_udld_interface(module, interface)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_vlan.py b/network/nxos/nxos_vlan.py
index 0c7db5d26e5..67719262bd0 100644
--- a/network/nxos/nxos_vlan.py
+++ b/network/nxos/nxos_vlan.py
@@ -16,13 +16,17 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: nxos_vlan
version_added: "2.1"
-short_description: Manages VLAN resources and attributes
+short_description: Manages VLAN resources and attributes.
description:
- - Manages VLAN configurations on NX-OS switches
+ - Manages VLAN configurations on NX-OS switches.
author: Jason Edelman (@jedelman8)
extends_documentation_fragment: nxos
options:
@@ -33,28 +37,35 @@
default: null
vlan_range:
description:
- - A range of VLANs such as I(2-10) or I(2,5,10-15) etc.
+ - Range of VLANs such as 2-10 or 2,5,10-15, etc.
required: false
default: null
name:
description:
- - The name of VLAN.
+ - Name of VLAN.
required: false
default: null
vlan_state:
description:
- - Manage the VLAN operational state of the VLAN
+ - Manage the vlan operational state of the VLAN
(equivalent to state {active | suspend} command.
required: false
default: active
choices: ['active','suspend']
admin_state:
description:
- - Manage the VLAN admin state of the VLAN equivalent
+ - Manage the VLAN administrative state of the VLAN equivalent
to shut/no shut in VLAN config mode.
required: false
default: up
choices: ['up','down']
+ mapped_vni:
+ description:
+ - The Virtual Network Identifier (VNI) ID that is mapped to the
+ VLAN. Valid values are integer and keyword 'default'.
+ required: false
+ default: null
+ version_added: "2.2"
state:
description:
- Manage the state of the resource.
@@ -64,16 +75,33 @@
'''
EXAMPLES = '''
-# Ensure a range of VLANs are not present on the switch
-- nxos_vlan: vlan_range="2-10,20,50,55-60,100-150" host={{ inventory_hostname }} username=cisco password=cisco state=absent transport=nxapi
-
-# Ensure VLAN 50 exists with the name WEB and is in the shutdown state
-- nxos_vlan: vlan_id=50 host={{ inventory_hostname }} admin_state=down name=WEB transport=nxapi username=cisco password=cisco
-
-# Ensure VLAN is NOT on the device
-- nxos_vlan: vlan_id=50 host={{ inventory_hostname }} state=absent transport=nxapi username=cisco password=cisco
-
-
+- name: Ensure a range of VLANs are not present on the switch
+ nxos_vlan:
+ vlan_range: "2-10,20,50,55-60,100-150"
+ host: 68.170.147.165
+ username: cisco
+ password: cisco
+ state: absent
+ transport: nxapi
+
+- name: Ensure VLAN 50 exists with the name WEB and is in the shutdown state
+ nxos_vlan:
+ vlan_id: 50
+ host: 68.170.147.165
+ admin_state: down
+ name: WEB
+ transport: nxapi
+ username: cisco
+ password: cisco
+
+- name: Ensure VLAN is NOT on the device
+ nxos_vlan:
+ vlan_id: 50
+ host: 68.170.147.165
+ state: absent
+ transport: nxapi
+ username: cisco
+ password: cisco
'''
RETURN = '''
@@ -99,30 +127,25 @@
returned: always
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan",
- "vlan_state": "suspend"}
+ "vlan_state": "suspend", "mapped_vni": "5000"}
existing:
description: k/v pairs of existing vlan or null when using vlan_range
returned: always
type: dict
sample: {"admin_state": "down", "name": "app_vlan",
- "vlan_id": "20", "vlan_state": "suspend"}
+ "vlan_id": "20", "vlan_state": "suspend", "mapped_vni": ""}
end_state:
description: k/v pairs of the VLAN after executing module or null
when using vlan_range
returned: always
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan", "vlan_id": "20",
- "vlan_state": "suspend"}
-state:
- description: state as sent in from the playbook
- returned: always
- type: string
- sample: "present"
+ "vlan_state": "suspend", "mapped_vni": "5000"}
updates:
description: command string sent to the device
returned: always
type: list
- sample: ["vlan 20", "vlan 55"]
+ sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
changed:
description: check to see if a change was made on the device
returned: always
@@ -131,6 +154,162 @@
'''
+import json
+import collections
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
def vlan_range_to_list(vlans):
result = []
@@ -196,13 +375,17 @@ def get_vlan_config_commands(vlan, vid):
'name': 'name {0}',
'vlan_state': 'state {0}',
'admin_state': '{0}',
- 'mode': 'mode {0}'
+ 'mode': 'mode {0}',
+ 'mapped_vni': 'vn-segment {0}'
}
commands = []
for param, value in vlan.iteritems():
- command = VLAN_ARGS.get(param).format(vlan.get(param))
+ if param == 'mapped_vni' and value == 'default':
+ command = 'no vn-segment'
+ else:
+ command = VLAN_ARGS.get(param).format(vlan.get(param))
if command:
commands.append(command)
@@ -213,7 +396,6 @@ def get_vlan_config_commands(vlan, vid):
def get_list_of_vlans(module):
-
command = 'show vlan'
body = execute_show_command(command, module)
vlan_list = []
@@ -228,6 +410,17 @@ def get_list_of_vlans(module):
return vlan_list
+def get_vni(vlanid, module):
+ command = 'show run all | section vlan.{0}'.format(vlanid)
+ body = execute_show_command(command, module, command_type='cli_show_ascii')[0]
+ value = ''
+ if body:
+ REGEX = re.compile(r'(?:vn-segment\s)(?P.*)$', re.M)
+ if 'vn-segment' in body:
+ value = REGEX.search(body).group('value')
+ return value
+
+
def get_vlan(vlanid, module):
"""Get instance of VLAN as a dictionary
"""
@@ -258,7 +451,7 @@ def get_vlan(vlanid, module):
}
vlan = apply_value_map(value_map, vlan)
-
+ vlan['mapped_vni'] = get_vni(vlanid, module)
return vlan
@@ -284,6 +477,15 @@ def execute_config_command(commands, module):
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
@@ -293,7 +495,9 @@ def get_cli_body_ssh(command, response, module):
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet.
"""
- if 'xml' in response[0]:
+ if 'show run' in command or response[0] == '\n':
+ body = response
+ elif 'xml' in response[0]:
body = []
else:
try:
@@ -305,6 +509,11 @@ def get_cli_body_ssh(command, response, module):
def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
@@ -312,15 +521,28 @@ def execute_show(cmds, module, command_type=None):
response = module.execute(cmds)
except ShellError:
clie = get_exception()
- module.fail_json(msg='Error sending {0}'.format(command),
+ module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
-
if module.params['transport'] == 'cli':
- command += ' | json'
+ if 'show run' not in command:
+ command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
@@ -337,20 +559,25 @@ def main():
vlan_range=dict(required=False),
name=dict(required=False),
vlan_state=dict(choices=['active', 'suspend'], required=False),
+ mapped_vni=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
admin_state=dict(choices=['up', 'down'], required=False),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
)
- module = get_module(argument_spec=argument_spec,
- mutually_exclusive=[['vlan_range', 'name'],
- ['vlan_id', 'vlan_range']],
- supports_check_mode=True)
+ module = get_network_module(argument_spec=argument_spec,
+ mutually_exclusive=[['vlan_range', 'name'],
+ ['vlan_id', 'vlan_range']],
+ supports_check_mode=True)
vlan_range = module.params['vlan_range']
vlan_id = module.params['vlan_id']
name = module.params['name']
vlan_state = module.params['vlan_state']
admin_state = module.params['admin_state']
+ mapped_vni = module.params['mapped_vni']
state = module.params['state']
changed = False
@@ -360,14 +587,13 @@ def main():
module.fail_json(msg='vlan_id must be a valid VLAN ID')
args = dict(name=name, vlan_state=vlan_state,
- admin_state=admin_state)
+ admin_state=admin_state, mapped_vni=mapped_vni)
proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
proposed_vlans_list = numerical_sort(vlan_range_to_list(
vlan_id or vlan_range))
existing_vlans_list = numerical_sort(get_list_of_vlans(module))
-
commands = []
existing = None
@@ -390,6 +616,9 @@ def main():
if existing:
commands = ['no vlan ' + vlan_id]
elif state == 'present':
+ if (existing.get('mapped_vni') == '0' and
+ proposed.get('mapped_vni') == 'default'):
+ proposed.pop('mapped_vni')
delta = dict(set(
proposed.iteritems()).difference(existing.iteritems()))
if delta or not existing:
@@ -399,6 +628,10 @@ def main():
end_state_vlans_list = existing_vlans_list
if commands:
+ if existing.get('mapped_vni'):
+ if (existing.get('mapped_vni') != proposed.get('mapped_vni') and
+ existing.get('mapped_vni') != '0' and proposed.get('mapped_vni') != 'default'):
+ commands.insert(1, 'no vn-segment')
if module.check_mode:
module.exit_json(changed=True,
commands=commands)
@@ -406,6 +639,8 @@ def main():
execute_config_command(commands, module)
changed = True
end_state_vlans_list = numerical_sort(get_list_of_vlans(module))
+ if 'configure' in commands:
+ commands.pop(0)
if vlan_id:
end_state = get_vlan(vlan_id, module)
@@ -416,17 +651,11 @@ def main():
results['existing'] = existing
results['end_state'] = end_state
results['end_state_vlans_list'] = end_state_vlans_list
- results['state'] = state
results['updates'] = commands
results['changed'] = changed
module.exit_json(**results)
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.nxos import *
if __name__ == '__main__':
main()
diff --git a/network/nxos/nxos_vpc.py b/network/nxos/nxos_vpc.py
new file mode 100644
index 00000000000..42af1cb6511
--- /dev/null
+++ b/network/nxos/nxos_vpc.py
@@ -0,0 +1,659 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_vpc
+version_added: "2.2"
+short_description: Manages global VPC configuration
+description:
+ - Manages global VPC configuration
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - The feature vpc must be enabled before this module can be used
+ - If not using management vrf, vrf must be globally on the device
+ before using in the pkl config
+ - Although source IP isn't required on the command line it is
+ required when using this module. The PKL VRF must also be configured
+ prior to using this module.
+ - Both pkl_src and pkl_dest are needed when changing PKL VRF.
+options:
+ domain:
+ description:
+ - VPC domain
+ required: true
+ role_priority:
+ description:
+ - Role priority for device. Remember lower is better.
+ required: false
+ default: null
+ system_priority:
+ description:
+ - System priority device. Remember they must match between peers.
+ required: false
+ default: null
+ pkl_src:
+ description:
+ - Source IP address used for peer keepalive link
+ required: false
+ default: null
+ pkl_dest:
+ description:
+ - Destination (remote) IP address used for peer keepalive link
+ required: false
+ default: null
+ pkl_vrf:
+ description:
+ - VRF used for peer keepalive link
+ required: false
+ default: management
+ peer_gw:
+ description:
+ - Enables/Disables peer gateway
+ required: true
+ choices: ['true','false']
+ auto_recovery:
+ description:
+ - Enables/Disables auto recovery
+ required: true
+ choices: ['true','false']
+ delay_restore:
+ description:
+ - manages delay restore command and config value in seconds
+ required: false
+ default: null
+ state:
+ description:
+ - Manages desired state of the resource
+ required: true
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+# configure a simple asn
+- nxos_vpc:
+ domain: 100
+ role_priority: 1000
+ system_priority: 2000
+ pkl_dest: 192.168.100.4
+ pkl_src: 10.1.100.20
+ peer_gw: true
+ auto_recovery: true
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"auto_recovery": true, "domain": "100",
+ "peer_gw": true, "pkl_dest": "192.168.100.4",
+ "pkl_src": "10.1.100.20", "pkl_vrf": "management",
+ "role_priority": "1000", "system_priority": "2000"}
+existing:
+ description: k/v pairs of existing VPC configuration
+ type: dict
+ sample: {"auto_recovery": true, "delay_restore": null,
+ "domain": "100", "peer_gw": true,
+ "pkl_dest": "192.168.100.2", "pkl_src": "10.1.100.20",
+ "pkl_vrf": "management", "role_priority": "1000",
+ "system_priority": "2000"}
+end_state:
+ description: k/v pairs of VPC configuration after module execution
+ returned: always
+ type: dict
+ sample: {"auto_recovery": true, "domain": "100",
+ "peer_gw": true, "pkl_dest": "192.168.100.4",
+ "pkl_src": "10.1.100.20", "pkl_vrf": "management",
+ "role_priority": "1000", "system_priority": "2000"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["vpc domain 100",
+ "peer-keepalive destination 192.168.100.4 source 10.1.100.20 vrf management",
+ "auto-recovery", "peer-gateway"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import json
+import collections
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh.
+ """
+ if '^' == response[0]:
+ body = []
+ elif 'running' in command:
+ body = response
+ else:
+ if command in response[0]:
+ response = [response[0].split(command)[1]]
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if "section" not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_vrf_list(module):
+ command = 'show vrf all'
+ vrf_table = None
+
+ body = execute_show_command(command, module)
+
+ try:
+ vrf_table = body[0]['TABLE_vrf']['ROW_vrf']
+ except (KeyError, AttributeError):
+ return []
+
+ vrf_list = []
+ if vrf_table:
+ for each in vrf_table:
+ vrf_list.append(str(each['vrf_name'].lower()))
+
+ return vrf_list
+
+
+def get_autorecovery(auto):
+ auto_recovery = auto.split(' ')[0]
+ if 'enabled' in auto_recovery.lower():
+ return True
+ else:
+ return False
+
+
+def get_vpc_running_config(module):
+ command = 'show running section vpc'
+ body = execute_show_command(command, module, command_type='cli_show_ascii')
+
+ return body
+
+
+def get_vpc(module):
+ vpc = {}
+
+ command = 'show vpc'
+ body = execute_show_command(command, module)[0]
+ domain = str(body['vpc-domain-id'])
+ auto_recovery = get_autorecovery(str(
+ body['vpc-auto-recovery-status']))
+
+ if domain != 'not configured':
+ delay_restore = None
+ pkl_src = None
+ role_priority = None
+ system_priority = None
+ pkl_dest = None
+ pkl_vrf = None
+ peer_gw = False
+
+ run = get_vpc_running_config(module)[0]
+ if run:
+ vpc_list = run.split('\n')
+ for each in vpc_list:
+ if 'delay restore' in each:
+ line = each.split()
+ if len(line) == 5:
+ delay_restore = line[-1]
+ if 'peer-keepalive destination' in each:
+ line = each.split()
+ pkl_dest = line[2]
+ for word in line:
+ if 'source' in word:
+ index = line.index(word)
+ pkl_src = line[index + 1]
+ if 'role priority' in each:
+ line = each.split()
+ role_priority = line[-1]
+ if 'system-priority' in each:
+ line = each.split()
+ system_priority = line[-1]
+ if 'peer-gateway' in each:
+ peer_gw = True
+
+
+ command = 'show vpc peer-keepalive'
+ body = execute_show_command(command, module)[0]
+
+ if body:
+ pkl_dest = body['vpc-keepalive-dest']
+ if 'N/A' in pkl_dest:
+ pkl_dest = None
+ elif len(pkl_dest) == 2:
+ pkl_dest = pkl_dest[0]
+ pkl_vrf = str(body['vpc-keepalive-vrf'])
+
+ vpc['domain'] = domain
+ vpc['auto_recovery'] = auto_recovery
+ vpc['delay_restore'] = delay_restore
+ vpc['pkl_src'] = pkl_src
+ vpc['role_priority'] = role_priority
+ vpc['system_priority'] = system_priority
+ vpc['pkl_dest'] = pkl_dest
+ vpc['pkl_vrf'] = pkl_vrf
+ vpc['peer_gw'] = peer_gw
+ else:
+ vpc = {}
+
+ return vpc
+
+
+def get_commands_to_config_vpc(module, vpc, domain, existing):
+ vpc = dict(vpc)
+
+ domain_only = vpc.get('domain')
+ pkl_src = vpc.get('pkl_src')
+ pkl_dest = vpc.get('pkl_dest')
+ pkl_vrf = vpc.get('pkl_vrf') or existing.get('pkl_vrf')
+ vpc['pkl_vrf'] = pkl_vrf
+
+ commands = []
+ if pkl_src or pkl_dest:
+ if pkl_src is None:
+ vpc['pkl_src'] = existing.get('pkl_src')
+ elif pkl_dest is None:
+ vpc['pkl_dest'] = existing.get('pkl_dest')
+ pkl_command = 'peer-keepalive destination {pkl_dest}'.format(**vpc) \
+ + ' source {pkl_src} vrf {pkl_vrf}'.format(**vpc)
+ commands.append(pkl_command)
+ elif pkl_vrf:
+ pkl_src = existing.get('pkl_src')
+ pkl_dest = existing.get('pkl_dest')
+ if pkl_src and pkl_dest:
+ pkl_command = ('peer-keepalive destination {0}'
+ ' source {1} vrf {2}'.format(pkl_dest, pkl_src, pkl_vrf))
+ commands.append(pkl_command)
+
+ if vpc.get('auto_recovery') == False:
+ vpc['auto_recovery'] = 'no'
+ else:
+ vpc['auto_recovery'] = ''
+
+ if vpc.get('peer_gw') == False:
+ vpc['peer_gw'] = 'no'
+ else:
+ vpc['peer_gw'] = ''
+
+ CONFIG_ARGS = {
+ 'role_priority': 'role priority {role_priority}',
+ 'system_priority': 'system-priority {system_priority}',
+ 'delay_restore': 'delay restore {delay_restore}',
+ 'peer_gw': '{peer_gw} peer-gateway',
+ 'auto_recovery': '{auto_recovery} auto-recovery',
+ }
+
+ for param, value in vpc.iteritems():
+ command = CONFIG_ARGS.get(param, 'DNE').format(**vpc)
+ if command and command != 'DNE':
+ commands.append(command.strip())
+ command = None
+
+ if commands or domain_only:
+ commands.insert(0, 'vpc domain {0}'.format(domain))
+ return commands
+
+
+def get_commands_to_remove_vpc_interface(portchannel, config_value):
+ commands = []
+ command = 'no vpc {0}'.format(config_value)
+ commands.append(command)
+ commands.insert(0, 'interface port-channel{0}'.format(portchannel))
+ return commands
+
+
+def main():
+ argument_spec = dict(
+ domain=dict(required=True, type='str'),
+ role_priority=dict(required=False, type='str'),
+ system_priority=dict(required=False, type='str'),
+ pkl_src=dict(required=False),
+ pkl_dest=dict(required=False),
+ pkl_vrf=dict(required=False, default='management'),
+ peer_gw=dict(required=True, type='bool'),
+ auto_recovery=dict(required=True, type='bool'),
+ delay_restore=dict(required=False, type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ domain = module.params['domain']
+ role_priority = module.params['role_priority']
+ system_priority = module.params['system_priority']
+ pkl_src = module.params['pkl_src']
+ pkl_dest = module.params['pkl_dest']
+ pkl_vrf = module.params['pkl_vrf']
+ peer_gw = module.params['peer_gw']
+ auto_recovery = module.params['auto_recovery']
+ delay_restore = module.params['delay_restore']
+ state = module.params['state']
+
+ args = dict(domain=domain, role_priority=role_priority,
+ system_priority=system_priority, pkl_src=pkl_src,
+ pkl_dest=pkl_dest, pkl_vrf=pkl_vrf, peer_gw=peer_gw,
+ auto_recovery=auto_recovery,
+ delay_restore=delay_restore)
+
+ if not (pkl_src and pkl_dest and pkl_vrf):
+ # if only the source or dest is set, it'll fail and ask to set the
+ # other
+ if pkl_src or pkl_dest:
+ module.fail_json(msg='source AND dest IP for pkl are required at '
+ 'this time (although source is technically not '
+ ' required by the device.)')
+
+ args.pop('pkl_src')
+ args.pop('pkl_dest')
+ args.pop('pkl_vrf')
+
+ if pkl_vrf:
+ if pkl_vrf.lower() not in get_vrf_list(module):
+ module.fail_json(msg='The VRF you are trying to use for the peer '
+ 'keepalive link is not on device yet. Add it'
+ ' first, please.')
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+ changed = False
+ existing = get_vpc(module)
+ end_state = existing
+
+ commands = []
+ if state == 'present':
+ delta = set(proposed.iteritems()).difference(existing.iteritems())
+ if delta:
+ command = get_commands_to_config_vpc(module, delta, domain, existing)
+ commands.append(command)
+ elif state == 'absent':
+ if existing:
+ if domain != existing['domain']:
+ module.fail_json(msg="You are trying to remove a domain that "
+ "does not exist on the device")
+ else:
+ commands.append('no vpc domain {0}'.format(domain))
+
+ cmds = flatten_list(commands)
+
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_vpc(module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_vpc_interface.py b/network/nxos/nxos_vpc_interface.py
new file mode 100644
index 00000000000..6f122f6f2d5
--- /dev/null
+++ b/network/nxos/nxos_vpc_interface.py
@@ -0,0 +1,593 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_vpc_interface
+version_added: "2.2"
+short_description: Manages interface VPC configuration
+description:
+ - Manages interface VPC configuration
+extends_documentation_fragment: nxos
+author:
+ - Jason Edelman (@jedelman8)
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - Either vpc or peer_link param is required, but not both.
+ - C(state=absent) removes whatever VPC config is on a port-channel
+ if one exists.
+ - Re-assigning a vpc or peerlink from one portchannel to another is not
+ supported. The module will force the user to unconfigure an existing
+ vpc/pl before configuring the same value on a new portchannel
+options:
+ portchannel:
+ description:
+ - Group number of the portchannel that will be configured.
+ required: true
+ vpc:
+ description:
+ - VPC group/id that will be configured on associated portchannel.
+ required: false
+ default: null
+ peer_link:
+ description:
+ - Set to true/false for peer link config on associated portchannel.
+ required: false
+ default: null
+ state:
+ description:
+ - Manages desired state of the resource.
+ required: true
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+- nxos_vpc_portchannel:
+ portchannel: 10
+ vpc: 100
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"portchannel": "100", "vpc": "10"}
+existing:
+ description: k/v pairs of existing configuration
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: always
+ type: dict
+ sample: {"peer-link": false, "portchannel": "100", "vpc": "10"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["interface port-channel100", "vpc 10"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+import collections
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+def execute_config_command(commands, module):
+ try:
+ response = module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ return response
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh.
+ """
+ if '^' == response[0]:
+ body = []
+ elif 'running' in command or 'xml' in response[0]:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_portchannel_list(module):
+ command = 'show port-channel summary'
+ portchannels = []
+ pc_list = []
+
+ body = execute_show_command(command, module)
+
+ try:
+ pc_list = body[0]['TABLE_channel']['ROW_channel']
+ except (KeyError, AttributeError):
+ return portchannels
+
+ if pc_list:
+ if isinstance(pc_list, dict):
+ pc_list = [pc_list]
+
+ for pc in pc_list:
+ portchannels.append(pc['group'])
+
+ return portchannels
+
+
+def get_existing_portchannel_to_vpc_mappings(module):
+ command = 'show vpc brief'
+ pc_vpc_mapping = {}
+
+ body = execute_show_command(command, module)
+
+ try:
+ vpc_table = body[0]['TABLE_vpc']['ROW_vpc']
+ except (KeyError, AttributeError, TypeError):
+ vpc_table = None
+
+ if vpc_table:
+ if isinstance(vpc_table, dict):
+ vpc_table = [vpc_table]
+
+ for vpc in vpc_table:
+ pc_vpc_mapping[str(vpc['vpc-id'])] = str(vpc['vpc-ifindex'])
+
+ return pc_vpc_mapping
+
+
+def peer_link_exists(module):
+ found = False
+ run = get_vpc_running_config(module)
+
+ vpc_list = run.split('\n')
+ for each in vpc_list:
+ if 'peer-link' in each:
+ found = True
+ return found
+
+
+def get_vpc_running_config(module):
+ command = 'show running section vpc'
+ body = execute_show_command(command, module,
+ command_type='cli_show_ascii')[0]
+
+ return body
+
+
+def get_active_vpc_peer_link(module):
+ command = 'show vpc brief'
+ peer_link = None
+ body = execute_show_command(command, module)
+ try:
+ peer_link = body[0]['TABLE_peerlink']['ROW_peerlink']['peerlink-ifindex']
+ except (KeyError, AttributeError):
+ return peer_link
+
+ return peer_link
+
+
+def get_portchannel_vpc_config(module, portchannel):
+ command = 'show vpc brief'
+ peer_link_pc = None
+ peer_link = False
+ vpc = ""
+ pc = ""
+ config = {}
+
+ body = execute_show_command(command, module)
+
+ try:
+ table = body[0]['TABLE_peerlink']['ROW_peerlink']
+ except (KeyError, AttributeError, TypeError):
+ table = {}
+
+ if table:
+ peer_link_pc = table.get('peerlink-ifindex', None)
+
+ if peer_link_pc:
+ plpc = str(peer_link_pc[2:])
+ if portchannel == plpc:
+ config['portchannel'] = portchannel
+ config['peer-link'] = True
+ config['vpc'] = vpc
+
+ mapping = get_existing_portchannel_to_vpc_mappings(module)
+
+ for existing_vpc, port_channel in mapping.iteritems():
+ port_ch = str(port_channel[2:])
+ if port_ch == portchannel:
+ pc = port_ch
+ vpc = str(existing_vpc)
+
+ config['portchannel'] = pc
+ config['peer-link'] = peer_link
+ config['vpc'] = vpc
+
+ return config
+
+
+def get_commands_to_config_vpc_interface(portchannel, delta, config_value, existing):
+ commands = []
+
+ if delta.get('peer-link') is False and existing.get('peer-link') is True:
+ command = 'no vpc peer-link'
+ commands.append('no vpc peer-link')
+ commands.insert(0, 'interface port-channel{0}'.format(portchannel))
+
+ elif delta.get('peer-link') or not existing.get('vpc'):
+ command = 'vpc {0}'.format(config_value)
+ commands.append(command)
+ commands.insert(0, 'interface port-channel{0}'.format(portchannel))
+
+ return commands
+
+
+def main():
+ argument_spec = dict(
+ portchannel=dict(required=True, type='str'),
+ vpc=dict(required=False, type='str'),
+ peer_link=dict(required=False, type='bool'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ mutually_exclusive=[['vpc', 'peer_link']],
+ supports_check_mode=True)
+
+ portchannel = module.params['portchannel']
+ vpc = module.params['vpc']
+ peer_link = module.params['peer_link']
+ state = module.params['state']
+
+ changed = False
+ args = {'portchannel': portchannel, 'vpc': vpc, 'peer-link': peer_link}
+ active_peer_link = None
+
+ if portchannel not in get_portchannel_list(module):
+ module.fail_json(msg="The portchannel you are trying to make a"
+ " VPC or PL is not created yet. "
+ "Create it first!")
+ if vpc:
+ mapping = get_existing_portchannel_to_vpc_mappings(module)
+
+ if vpc in mapping and portchannel != mapping[vpc].strip('Po'):
+ module.fail_json(msg="This vpc is already configured on "
+ "another portchannel. Remove it first "
+ "before trying to assign it here. ",
+ existing_portchannel=mapping[vpc])
+
+ for vpcid, existing_pc in mapping.iteritems():
+ if portchannel == existing_pc.strip('Po') and vpcid != vpc:
+ module.fail_json(msg="This portchannel already has another"
+ " VPC configured. Remove it first "
+ "before assigning this one",
+ existing_vpc=vpcid)
+
+ if peer_link_exists(module):
+ active_peer_link = get_active_vpc_peer_link(module)
+ if active_peer_link[-2:] == portchannel:
+ module.fail_json(msg="That port channel is the current "
+ "PEER LINK. Remove it if you want it"
+ " to be a VPC")
+ config_value = vpc
+
+ elif peer_link is not None:
+ if peer_link_exists(module):
+ active_peer_link = get_active_vpc_peer_link(module)[2::]
+ if active_peer_link != portchannel:
+ if peer_link:
+ module.fail_json(msg="A peer link already exists on"
+ " the device. Remove it first",
+ current_peer_link='Po{0}'.format(
+ active_peer_link))
+ config_value = 'peer-link'
+
+
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+ existing = get_portchannel_vpc_config(module, portchannel)
+ end_state = existing
+ commands = []
+
+ if state == 'present':
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+ if delta:
+ command = get_commands_to_config_vpc_interface(
+ portchannel,
+ delta,
+ config_value,
+ existing
+ )
+ commands.append(command)
+
+ elif state == 'absent':
+ if existing.get('vpc'):
+ command = ['no vpc']
+ commands.append(command)
+ elif existing.get('peer-link'):
+ command = ['no vpc peer-link']
+ commands.append(command)
+ if commands:
+ commands.insert(0, ['interface port-channel{0}'.format(portchannel)])
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ output = execute_config_command(cmds, module)
+ if module.params['transport'] == 'cli':
+ output = ' '.join(output)
+ if 'error' in output.lower():
+ module.fail_json(msg=output.replace('\n', ''))
+ end_state = get_portchannel_vpc_config(module, portchannel)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_vrf.py b/network/nxos/nxos_vrf.py
index 4b1f8911375..eb60306e08a 100644
--- a/network/nxos/nxos_vrf.py
+++ b/network/nxos/nxos_vrf.py
@@ -16,13 +16,17 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: nxos_vrf
version_added: "2.1"
-short_description: Manages global VRF configuration
+short_description: Manages global VRF configuration.
description:
- - Manages global VRF configuration
+ - Manages global VRF configuration.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
@@ -30,11 +34,11 @@
notes:
- Cisco NX-OS creates the default VRF by itself. Therefore,
you're not allowed to use default as I(vrf) name in this module.
- - I(vrf) name must be shorter than 32 chars.
+ - C(vrf) name must be shorter than 32 chars.
- VRF names are not case sensible in NX-OS. Anyway, the name is stored
just like it's inserted by the user and it'll not be changed again
- unless the VRF is removed and re-created. i.e. I(vrf=NTC) will create
- a VRF named NTC, but running it again with I(vrf=ntc) will not cause
+ unless the VRF is removed and re-created. i.e. C(vrf=NTC) will create
+ a VRF named NTC, but running it again with C(vrf=ntc) will not cause
a configuration change.
options:
vrf:
@@ -47,6 +51,21 @@
required: false
default: up
choices: ['up','down']
+ vni:
+ description:
+ - Specify virtual network identifier. Valid values are Integer
+ or keyword 'default'.
+ required: false
+ default: null
+ version_added: "2.2"
+ route_distinguisher:
+ description:
+ - VPN Route Distinguisher (RD). Valid values are a string in
+ one of the route-distinguisher formats (ASN2:NN, ASN4:NN, or
+ IPV4:NN); the keyword 'auto', or the keyword 'default'.
+ required: false
+ default: null
+ version_added: "2.2"
state:
description:
- Manages desired state of the resource.
@@ -55,16 +74,18 @@
choices: ['present','absent']
description:
description:
- - Description of the VRF
+ - Description of the VRF.
required: false
default: null
'''
EXAMPLES = '''
-# ensure ntc VRF exists on switch
-- nxos_vrf: vrf=ntc host={{ inventory_hostname }}
-# ensure ntc VRF does not exist on switch
-- nxos_vrf: vrf=ntc host={{ inventory_hostname }} state=absent
+- name: Ensure ntc VRF exists on switch
+ nxos_vrf:
+ vrf: ntc
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
'''
RETURN = '''
@@ -85,11 +106,6 @@
type: dict
sample: {"admin_state": "Up", "description": "Test test",
"vrf": "ntc"}
-state:
- description: state as sent in from the playbook
- returned: always
- type: string
- sample: "present"
updates:
description: commands sent to the device
returned: always
@@ -102,6 +118,158 @@
sample: true
'''
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+import ansible.module_utils.nxos
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+from ansible.module_utils.network import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
def execute_config_command(commands, module):
try:
@@ -110,9 +278,18 @@ def execute_config_command(commands, module):
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
-
-
-def get_cli_body_ssh_vrf(command, response):
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh_vrf(module, command, response):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
@@ -121,9 +298,9 @@ def get_cli_body_ssh_vrf(command, response):
when using multiple |.
"""
command_splitted = command.split('|')
- if len(command_splitted) > 2:
+ if len(command_splitted) > 2 or 'show run' in command:
body = response
- elif 'xml' in response[0]:
+ elif 'xml' in response[0] or response[0] == '\n':
body = []
else:
body = [json.loads(response[0])]
@@ -131,6 +308,11 @@ def get_cli_body_ssh_vrf(command, response):
def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
@@ -140,15 +322,29 @@ def execute_show(cmds, module, command_type=None):
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
- command += ' | json'
+ if 'show run' not in command:
+ command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
- body = get_cli_body_ssh_vrf(command, response)
+ body = get_cli_body_ssh_vrf(module, command, response)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
@@ -180,6 +376,10 @@ def get_commands_to_config_vrf(delta, vrf):
command = 'no shutdown'
elif value.lower() == 'down':
command = 'shutdown'
+ elif param == 'rd':
+ command = 'rd {0}'.format(value)
+ elif param == 'vni':
+ command = 'vni {0}'.format(value)
if command:
commands.append(command)
if commands:
@@ -188,13 +388,12 @@ def get_commands_to_config_vrf(delta, vrf):
def get_vrf_description(vrf, module):
- cmd_type = 'cli_show_ascii'
- command = ('show run section vrf | begin ^vrf\scontext\s{0} '
- '| end ^vrf.*'.format(vrf))
+ command_type = 'cli_show_ascii'
+ command = (r'show run section vrf | begin ^vrf\scontext\s{0} | end ^vrf.*'.format(vrf))
description = ''
- descr_regex = ".*description\s(?P[\S+\s]+).*"
- body = execute_show_command(command, module, cmd_type)
+ descr_regex = r".*description\s(?P[\S+\s]+).*"
+ body = execute_show_command(command, module, command_type)
try:
body = body[0]
@@ -212,6 +411,14 @@ def get_vrf_description(vrf, module):
return description
+def get_value(arg, config, module):
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(arg), re.M)
+ value = ''
+ if arg in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
def get_vrf(vrf, module):
command = 'show vrf {0}'.format(vrf)
vrf_key = {
@@ -220,15 +427,18 @@ def get_vrf(vrf, module):
}
body = execute_show_command(command, module)
-
try:
vrf_table = body[0]['TABLE_vrf']['ROW_vrf']
except (TypeError, IndexError):
return {}
parsed_vrf = apply_key_map(vrf_key, vrf_table)
- parsed_vrf['description'] = get_vrf_description(
- parsed_vrf['vrf'], module)
+
+ command = 'show run all | section vrf.context.{0}'.format(vrf)
+ body = execute_show_command(command, module, 'cli_show_ascii')
+ extra_params = ['vni', 'rd', 'description']
+ for param in extra_params:
+ parsed_vrf[param] = get_value(param, body[0], module)
return parsed_vrf
@@ -237,17 +447,24 @@ def main():
argument_spec = dict(
vrf=dict(required=True),
description=dict(default=None, required=False),
+ vni=dict(required=False, type='str'),
+ rd=dict(required=False, type='str'),
admin_state=dict(default='up', choices=['up', 'down'],
required=False),
state=dict(default='present', choices=['present', 'absent'],
required=False),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
)
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
vrf = module.params['vrf']
admin_state = module.params['admin_state'].lower()
description = module.params['description']
+ rd = module.params['rd']
+ vni = module.params['vni']
state = module.params['state']
if vrf == 'default':
@@ -257,8 +474,8 @@ def main():
vrf=vrf)
existing = get_vrf(vrf, module)
- args = dict(vrf=vrf, description=description,
- admin_state=admin_state)
+ args = dict(vrf=vrf, description=description, vni=vni,
+ admin_state=admin_state, rd=rd)
end_state = existing
changed = False
@@ -289,28 +506,27 @@ def main():
commands.extend(command)
if commands:
+ if proposed.get('vni'):
+ if existing.get('vni') and existing.get('vni') != '':
+ commands.insert(1, 'no vni {0}'.format(existing['vni']))
if module.check_mode:
- module.exit_json(changed=True, commands=cmds)
+ module.exit_json(changed=True, commands=commands)
else:
execute_config_command(commands, module)
changed = True
end_state = get_vrf(vrf, module)
+ if 'configure' in commands:
+ commands.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
- results['state'] = state
results['updates'] = commands
results['changed'] = changed
module.exit_json(**results)
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.nxos import *
if __name__ == '__main__':
main()
diff --git a/network/nxos/nxos_vrf_af.py b/network/nxos/nxos_vrf_af.py
new file mode 100644
index 00000000000..70ca67109e7
--- /dev/null
+++ b/network/nxos/nxos_vrf_af.py
@@ -0,0 +1,436 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_vrf_af
+version_added: "2.2"
+short_description: Manages VRF AF.
+description:
+ - Manages VRF AF
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - Default, where supported, restores params default value.
+options:
+ vrf:
+ description:
+ - Name of the VRF.
+ required: true
+ afi:
+ description:
+ - Address-Family Identifier (AFI).
+ required: true
+ choices: ['ipv4', 'ipv6']
+ default: null
+ safi:
+ description:
+ - Sub Address-Family Identifier (SAFI).
+ required: true
+ choices: ['unicast', 'multicast']
+ default: null
+ route_target_both_auto_evpn:
+ description:
+ - Enable/Disable the EVPN route-target 'auto' setting for both
+ import and export target communities.
+ required: false
+ choices: ['true', 'false']
+ default: null
+ state:
+ description:
+ - Determines whether the config should be present or
+ not on the device.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+EXAMPLES = '''
+- nxos_vrf_af:
+ interface: nve1
+ vni: 6000
+ ingress_replication: true
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"afi": "ipv4", "route_target_both_auto_evpn": true,
+ "safi": "unicast", "vrf": "test"}
+existing:
+ description: k/v pairs of existing configuration
+ returned: verbose mode
+ type: dict
+ sample: {"afi": "ipv4", "route_target_both_auto_evpn": false,
+ "safi": "unicast", "vrf": "test"}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"afi": "ipv4", "route_target_both_auto_evpn": true,
+ "safi": "unicast", "vrf": "test"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["vrf context test", "address-family ipv4 unicast",
+ "route-target both auto evpn"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+BOOL_PARAMS = ['route_target_both_auto_evpn']
+PARAM_TO_COMMAND_KEYMAP = {
+ 'route_target_both_auto_evpn': 'route-target both auto evpn',
+}
+PARAM_TO_DEFAULT_KEYMAP = {}
+WARNINGS = []
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(arg, config, module):
+ if arg in BOOL_PARAMS:
+ REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+ try:
+ if REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+ else:
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ netcfg = get_config(module)
+
+ parents = ['vrf context {0}'.format(module.params['vrf'])]
+ parents.append('address-family {0} {1}'.format(module.params['afi'],
+ module.params['safi']))
+ config = netcfg.get_section(parents)
+ if config:
+ splitted_config = config.splitlines()
+ vrf_index = False
+ for index in range(0, len(splitted_config) - 1):
+ if 'vrf' in splitted_config[index].strip():
+ vrf_index = index
+ break
+ if vrf_index:
+ config = '\n'.join(splitted_config[0:vrf_index])
+
+ for arg in args:
+ if arg not in ['afi', 'safi', 'vrf']:
+ existing[arg] = get_value(arg, config, module)
+
+ existing['afi'] = module.params['afi']
+ existing['safi'] = module.params['safi']
+ existing['vrf'] = module.params['vrf']
+
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def state_present(module, existing, proposed, candidate):
+ commands = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ for key, value in proposed_commands.iteritems():
+ if value is True:
+ commands.append(key)
+
+ elif value is False:
+ commands.append('no {0}'.format(key))
+
+ elif value == 'default':
+ if existing_commands.get(key):
+ existing_value = existing_commands.get(key)
+ commands.append('no {0} {1}'.format(key, existing_value))
+ else:
+ command = '{0} {1}'.format(key, value.lower())
+ commands.append(command)
+
+ if commands:
+ parents = ['vrf context {0}'.format(module.params['vrf'])]
+ parents.append('address-family {0} {1}'.format(module.params['afi'],
+ module.params['safi']))
+ candidate.add(commands, parents=parents)
+
+
+def state_absent(module, existing, proposed, candidate):
+ commands = []
+ parents = ['vrf context {0}'.format(module.params['vrf'])]
+ commands.append('no address-family {0} {1}'.format(module.params['afi'],
+ module.params['safi']))
+ candidate.add(commands, parents=parents)
+
+
+def main():
+ argument_spec = dict(
+ vrf=dict(required=True, type='str'),
+ safi=dict(required=True, type='str', choices=['unicast','multicast']),
+ afi=dict(required=True, type='str', choices=['ipv4','ipv6']),
+ route_target_both_auto_evpn=dict(required=False, type='bool'),
+ m_facts=dict(required=False, default=False, type='bool'),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+
+ args = [
+ 'vrf',
+ 'safi',
+ 'afi',
+ 'route_target_both_auto_evpn'
+ ]
+
+ existing = invoke('get_existing', module, args)
+ end_state = existing
+ proposed_args = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ proposed = {}
+ for key, value in proposed_args.iteritems():
+ if key != 'interface':
+ if str(value).lower() == 'default':
+ value = PARAM_TO_DEFAULT_KEYMAP.get(key)
+ if value is None:
+ value = 'default'
+ if existing.get(key) or (not existing.get(key) and value):
+ proposed[key] = value
+
+ result = {}
+ if state == 'present' or (state == 'absent' and existing):
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed_args
+
+ if WARNINGS:
+ result['warnings'] = WARNINGS
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_vrf_interface.py b/network/nxos/nxos_vrf_interface.py
index 4930aff7115..a3420307a6f 100644
--- a/network/nxos/nxos_vrf_interface.py
+++ b/network/nxos/nxos_vrf_interface.py
@@ -16,13 +16,17 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: nxos_vrf_interface
version_added: "2.1"
-short_description: Manages interface specific VRF configuration
+short_description: Manages interface specific VRF configuration.
description:
- - Manages interface specific VRF configuration
+ - Manages interface specific VRF configuration.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
@@ -41,7 +45,7 @@
required: true
interface:
description:
- - Full name of interface to be managed, i.e. I(Ethernet1/1).
+ - Full name of interface to be managed, i.e. Ethernet1/1.
required: true
state:
description:
@@ -52,10 +56,19 @@
'''
EXAMPLES = '''
-# ensure vrf ntc exists on Eth1/1
-- nxos_vrf_interface: vrf=ntc interface=Ethernet1/1 host={{ inventory_hostname }} state=present
-# ensure ntc VRF does not exist on Eth1/1
-- nxos_vrf_interface: vrf=ntc interface=Ethernet1/1 host={{ inventory_hostname }} state=absent
+- name: Ensure vrf ntc exists on Eth1/1
+ nxos_vrf_interface:
+ vrf: ntc
+ interface: Ethernet1/1
+ host: 68.170.147.165
+ state: present
+
+- name: Ensure ntc VRF does not exist on Eth1/1
+ nxos_vrf_interface:
+ vrf: ntc
+ interface: Ethernet1/1
+ host: 68.170.147.165
+ state: absent
'''
RETURN = '''
@@ -73,11 +86,6 @@
returned: always
type: dict
sample: {"interface": "loopback16", "vrf": "ntc"}
-state:
- description: state as sent in from the playbook
- returned: always
- type: string
- sample: "present"
updates:
description: commands sent to the device
returned: always
@@ -90,6 +98,164 @@
sample: true
'''
+import json
+import collections
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+WARNINGS = []
def execute_config_command(commands, module):
try:
@@ -98,6 +264,15 @@ def execute_config_command(commands, module):
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
def get_cli_body_ssh_vrf_interface(command, response, module):
@@ -116,6 +291,11 @@ def get_cli_body_ssh_vrf_interface(command, response, module):
def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
@@ -123,14 +303,28 @@ def execute_show(cmds, module, command_type=None):
response = module.execute(cmds)
except ShellError:
clie = get_exception()
- module.fail_json(msg='Error sending {0}'.format(command),
+ module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
- command += ' | json'
+ if 'show run' not in command:
+ command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh_vrf_interface(command, response, module)
@@ -191,17 +385,16 @@ def get_vrf_list(module):
def get_interface_info(interface, module):
- command = 'show run interface {0}'.format(interface)
+ command = 'show run | section interface.{0}'.format(interface.capitalize())
vrf_regex = ".*vrf\s+member\s+(?P\S+).*"
try:
body = execute_show_command(command, module,
command_type='cli_show_ascii')[0]
-
match_vrf = re.match(vrf_regex, body, re.DOTALL)
group_vrf = match_vrf.groupdict()
vrf = group_vrf["vrf"]
- except AttributeError:
+ except (AttributeError, TypeError):
return ""
return vrf
@@ -229,9 +422,12 @@ def main():
interface=dict(type='str', required=True),
state=dict(default='present', choices=['present', 'absent'],
required=False),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
)
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
vrf = module.params['vrf']
interface = module.params['interface'].lower()
@@ -239,9 +435,8 @@ def main():
current_vrfs = get_vrf_list(module)
if vrf not in current_vrfs:
- module.fail_json(msg="Ensure the VRF you're trying to config/remove on"
- " an interface is created globally on the device"
- " first.")
+ WARNINGS.append("The VRF is not present/active on the device. "
+ "Use nxos_vrf to fix this.")
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' and module.params['transport'] == 'cli'):
@@ -293,22 +488,21 @@ def main():
changed = True
changed_vrf = get_interface_info(interface, module)
end_state = dict(interface=interface, vrf=changed_vrf)
+ if 'configure' in commands:
+ commands.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
- results['state'] = state
results['updates'] = commands
results['changed'] = changed
+ if WARNINGS:
+ results['warnings'] = WARNINGS
+
module.exit_json(**results)
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.nxos import *
if __name__ == '__main__':
- main()
+ main()
\ No newline at end of file
diff --git a/network/nxos/nxos_vrrp.py b/network/nxos/nxos_vrrp.py
index 6c65295336b..58c04a8367a 100644
--- a/network/nxos/nxos_vrrp.py
+++ b/network/nxos/nxos_vrrp.py
@@ -17,28 +17,31 @@
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
-
module: nxos_vrrp
version_added: "2.1"
-short_description: Manages VRRP configuration on NX-OS switches
+short_description: Manages VRRP configuration on NX-OS switches.
description:
- - Manages VRRP configuration on NX-OS switches
+ - Manages VRRP configuration on NX-OS switches.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
- - Gabriele Gerbino (@GGabriele)
+ - Gabriele Gerbino (@GGabriele)
notes:
- VRRP feature needs to be enabled first on the system.
- SVIs must exist before using this module.
- Interface must be a L3 port before using this module.
- - C(state=absent) removes the vrrp group if it exists on the device.
+ - C(state=absent) removes the VRRP group if it exists on the device.
- VRRP cannot be configured on loopback interfaces.
options:
group:
description:
- - The VRRP group number.
+ - VRRP group number.
required: true
interface:
description:
@@ -51,7 +54,7 @@
default: null
vip:
description:
- - HSRP virtual IP address.
+ - VRRP virtual IP address.
required: false
default: null
authentication:
@@ -59,6 +62,13 @@
- Clear text authentication string.
required: false
default: null
+ admin_state:
+ description:
+ - Used to enable or disable the VRRP process.
+ required: false
+ choices: ['shutdown', 'no shutdown']
+ default: no shutdown
+ version_added: "2.2"
state:
description:
- Specify desired state of the resource.
@@ -68,16 +78,31 @@
'''
EXAMPLES = '''
-
-# ensure vrrp group 100 and vip 10.1.100.1 is on vlan10
-- nxos_vrrp: interface=vlan10 group=100 vip=10.1.100.1 host={{ inventory_hostname }}
-
-# ensure removal of the vrrp group config # vip is required to ensure the user knows what they are removing
-- nxos_vrrp: interface=vlan10 group=100 vip=10.1.100.1 state=absent host={{ inventory_hostname }}
-
-# re-config with more params
-- nxos_vrrp: interface=vlan10 group=100 vip=10.1.100.1 preempt=false priority=130 authentication=AUTHKEY host={{ inventory_hostname }}
-
+- name: Ensure vrrp group 100 and vip 10.1.100.1 is on vlan10
+ nxos_vrrp:
+ interface: vlan10
+ group: 100
+ vip: 10.1.100.1
+ host: 68.170.147.165
+
+- name: Ensure removal of the vrrp group config
+ # vip is required to ensure the user knows what they are removing
+ nxos_vrrp:
+ interface: vlan10
+ group: 100
+ vip: 10.1.100.1
+ state: absent
+ host: 68.170.147.165
+
+- name: Re-config with more params
+ nxos_vrrp:
+ interface: vlan10
+ group: 100
+ vip: 10.1.100.1
+ preempt: false
+ priority: 130
+ authentication: AUTHKEY
+ host: 68.170.147.165
'''
RETURN = '''
@@ -85,7 +110,8 @@
description: k/v pairs of parameters passed into module
returned: always
type: dict
- sample: {"authentication": "testing", "group": "150", "vip": "10.1.15.1"}
+ sample: {"authentication": "testing", "group": "150", "vip": "10.1.15.1",
+ "admin_state": "no shutdown"}
existing:
description: k/v pairs of existing vrrp info on the interface
type: dict
@@ -95,18 +121,14 @@
returned: always
type: dict
sample: {"authentication": "testing", "group": "150", "interval": "1",
- "preempt": true, "priority": "100", "vip": "10.1.15.1"}
-state:
- description: state as sent in from the playbook
- returned: always
- type: string
- sample: "present"
+ "preempt": true, "priority": "100", "vip": "10.1.15.1",
+ "admin_state": "no shutdown"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface vlan10", "vrrp 150", "address 10.1.15.1",
- "authentication text testing"]
+ "authentication text testing", "no shutdown"]
changed:
description: check to see if a change was made on the device
returned: always
@@ -114,6 +136,162 @@
sample: true
'''
+import json
+import collections
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
def execute_config_command(commands, module):
try:
@@ -122,6 +300,15 @@ def execute_config_command(commands, module):
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
def get_cli_body_ssh_vrrp(command, response, module):
@@ -147,6 +334,11 @@ def get_cli_body_ssh_vrrp(command, response, module):
def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
@@ -154,11 +346,25 @@ def execute_show(cmds, module, command_type=None):
response = module.execute(cmds)
except ShellError:
clie = get_exception()
- module.fail_json(msg='Error sending {0}'.format(command),
+ module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
return response
+
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
command += ' | json'
@@ -223,19 +429,43 @@ def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
+ body = execute_show_command(command, module)[0]
+ interface_table = body['TABLE_interface']['ROW_interface']
+ name = interface_table.get('interface')
if intf_type in ['ethernet', 'portchannel']:
- body = execute_show_command(command, module)[0]
- interface_table = body['TABLE_interface']['ROW_interface']
mode = str(interface_table.get('eth_mode', 'layer3'))
+
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'svi':
mode = 'layer3'
- return mode
+ return mode, name
-def get_existing_vrrp(interface, group, module):
+
+def get_vrr_status(group, module, interface):
+ command = 'show run all | section interface.{0}$'.format(interface)
+ body = execute_show_command(command, module, command_type='cli_show_ascii')[0]
+ vrf_index = None
+ admin_state = 'shutdown'
+
+ if body:
+ splitted_body = body.splitlines()
+ for index in range(0, len(splitted_body) - 1):
+ if splitted_body[index].strip() == 'vrrp {0}'.format(group):
+ vrf_index = index
+ vrf_section = splitted_body[vrf_index::]
+
+ for line in vrf_section:
+ if line.strip() == 'no shutdown':
+ admin_state = 'no shutdown'
+ break
+
+ return admin_state
+
+
+def get_existing_vrrp(interface, group, module, name):
command = 'show vrrp detail interface {0}'.format(interface)
body = execute_show_command(command, module)
vrrp = {}
@@ -267,6 +497,8 @@ def get_existing_vrrp(interface, group, module):
parsed_vrrp['preempt'] = True
if parsed_vrrp['group'] == group:
+ parsed_vrrp['admin_state'] = get_vrr_status(group, module, name)
+
return parsed_vrrp
return vrrp
@@ -287,6 +519,7 @@ def get_commands_config_vrrp(delta, group):
preempt = delta.get('preempt')
interval = delta.get('interval')
auth = delta.get('authentication')
+ admin_state = delta.get('admin_state')
if vip:
commands.append((CMDS.get('vip')).format(vip))
@@ -300,6 +533,8 @@ def get_commands_config_vrrp(delta, group):
commands.append((CMDS.get('interval')).format(interval))
if auth:
commands.append((CMDS.get('auth')).format(auth))
+ if admin_state:
+ commands.append(admin_state)
commands.insert(0, 'vrrp {0}'.format(group))
@@ -340,14 +575,20 @@ def main():
group=dict(required=True, type='str'),
interface=dict(required=True),
priority=dict(required=False, type='str'),
- preempt=dict(required=False, choices=BOOLEANS, type='bool'),
+ preempt=dict(required=False, type='bool'),
vip=dict(required=False, type='str'),
+ admin_state=dict(required=False, type='str',
+ choices=['shutdown', 'no shutdown'],
+ default='no shutdown'),
authentication=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'],
required=False, default='present'),
+ include_defaults=dict(default=False),
+ config=dict(),
+ save=dict(type='bool', default=False)
)
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
state = module.params['state']
interface = module.params['interface'].lower()
@@ -356,6 +597,7 @@ def main():
preempt = module.params['preempt']
vip = module.params['vip']
authentication = module.params['authentication']
+ admin_state = module.params['admin_state']
transport = module.params['transport']
@@ -371,16 +613,17 @@ def main():
module.fail_json(msg="Loopback interfaces don't support VRRP.",
interface=interface)
- mode = get_interface_mode(interface, intf_type, module)
+ mode, name = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='That interface is a layer2 port.\nMake it '
'a layer 3 port first.', interface=interface)
args = dict(group=group, priority=priority, preempt=preempt,
- vip=vip, authentication=authentication)
+ vip=vip, authentication=authentication,
+ admin_state=admin_state)
proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
- existing = get_existing_vrrp(interface, group, module)
+ existing = get_existing_vrrp(interface, group, module, name)
changed = False
end_state = existing
@@ -407,12 +650,13 @@ def main():
else:
execute_config_command(cmds, module)
changed = True
- end_state = get_existing_vrrp(interface, group, module)
+ end_state = get_existing_vrrp(interface, group, module, name)
+ if 'configure' in cmds:
+ cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
- results['state'] = state
results['updates'] = cmds
results['changed'] = changed
results['end_state'] = end_state
@@ -420,10 +664,5 @@ def main():
module.exit_json(**results)
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.nxos import *
if __name__ == '__main__':
main()
diff --git a/network/nxos/nxos_vtp_domain.py b/network/nxos/nxos_vtp_domain.py
new file mode 100644
index 00000000000..f96db115a6b
--- /dev/null
+++ b/network/nxos/nxos_vtp_domain.py
@@ -0,0 +1,423 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_vtp_domain
+version_added: "2.2"
+short_description: Manages VTP domain configuration.
+description:
+ - Manages VTP domain configuration.
+extends_documentation_fragment: nxos
+author:
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - VTP feature must be active on the device to use this module.
+ - This module is used to manage only VTP domain names.
+ - VTP domain names are case-sensible.
+ - If it's never been configured before, VTP version is set to 1 by default.
+ Otherwise, it leaves the previous configured version untouched.
+ Use M(nxos_vtp_version) to change it.
+ - Use this in combination with M(nxos_vtp_password) and M(nxos_vtp_version)
+ to fully manage VTP operations.
+options:
+ domain:
+ description:
+ - VTP domain name.
+ required: true
+'''
+
+EXAMPLES = '''
+# ENSURE VTP DOMAIN IS CONFIGURED
+- nxos_vtp_domain:
+ domain: ntc
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"domain": "ntc"}
+existing:
+ description:
+ - k/v pairs of existing vtp domain
+ type: dict
+ sample: {"domain": "testing", "version": "2", "vtp_password": "\"}
+end_state:
+ description: k/v pairs of vtp domain after module execution
+ returned: always
+ type: dict
+ sample: {"domain": "ntc", "version": "2", "vtp_password": "\"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["vtp domain ntc"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0] or response[0] == '\n':
+ body = []
+ elif 'status' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'status' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_vtp_config(module):
+ command = 'show vtp status'
+
+ body = execute_show_command(
+ command, module, command_type='cli_show_ascii')[0]
+ vtp_parsed = {}
+
+ if body:
+ version_regex = '.*VTP version running\s+:\s+(?P\d).*'
+ domain_regex = '.*VTP Domain Name\s+:\s+(?P\S+).*'
+
+ try:
+ match_version = re.match(version_regex, body, re.DOTALL)
+ version = match_version.groupdict()['version']
+ except AttributeError:
+ version = ''
+
+ try:
+ match_domain = re.match(domain_regex, body, re.DOTALL)
+ domain = match_domain.groupdict()['domain']
+ except AttributeError:
+ domain = ''
+
+ if domain and version:
+ vtp_parsed['domain'] = domain
+ vtp_parsed['version'] = version
+ vtp_parsed['vtp_password'] = get_vtp_password(module)
+
+ return vtp_parsed
+
+
+def get_vtp_password(module):
+ command = 'show vtp password'
+ body = execute_show_command(command, module)[0]
+ password = body['passwd']
+ if password:
+ return str(password)
+ else:
+ return ""
+
+
+def main():
+ argument_spec = dict(
+ domain=dict(type='str', required=True),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ domain = module.params['domain']
+
+ existing = get_vtp_config(module)
+ end_state = existing
+
+ args = dict(domain=domain)
+
+ changed = False
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+
+ commands = []
+ if delta:
+ commands.append(['vtp domain {0}'.format(domain)])
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_vtp_config(module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_vtp_password.py b/network/nxos/nxos_vtp_password.py
new file mode 100644
index 00000000000..12c142c2fc7
--- /dev/null
+++ b/network/nxos/nxos_vtp_password.py
@@ -0,0 +1,480 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+
+module: nxos_vtp
+version_added: "2.2"
+short_description: Manages VTP password configuration.
+description:
+ - Manages VTP password configuration.
+extends_documentation_fragment: nxos
+author:
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - VTP feature must be active on the device to use this module.
+ - This module is used to manage only VTP passwords.
+ - Use this in combination with M(nxos_vtp_domain) and M(nxos_vtp_version)
+ to fully manage VTP operations.
+ - You can set/remove password only if a VTP domain already exist.
+ - If C(state=absent) and no C(vtp_password) is provided, it remove the current
+ VTP password.
+ - If C(state=absent) and C(vtp_password) is provided, the proposed C(vtp_password)
+ has to match the existing one in order to remove it.
+options:
+ vtp_password:
+ description:
+ - VTP password
+ required: false
+ default: null
+ state:
+ description:
+ - Manage the state of the resource
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+
+EXAMPLES = '''
+# ENSURE VTP PASSWORD IS SET
+- nxos_vtp_password:
+ password: ntc
+ state: present
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+
+# ENSURE VTP PASSWORD IS REMOVED
+- nxos_vtp_password:
+ password: ntc
+ state: absent
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"vtp_password": "new_ntc"}
+existing:
+ description:
+ - k/v pairs of existing vtp
+ type: dict
+ sample: {"domain": "ntc", "version": "1", "vtp_password": "ntc"}
+end_state:
+ description: k/v pairs of vtp after module execution
+ returned: always
+ type: dict
+ sample: {"domain": "ntc", "version": "1", "vtp_password": "new_ntc"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["vtp password new_ntc"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0] or response[0] == '\n':
+ body = []
+ elif 'show run' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'show run' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = str(value)
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def get_vtp_config(module):
+ command = 'show vtp status'
+
+ body = execute_show_command(
+ command, module, command_type='cli_show_ascii')[0]
+ vtp_parsed = {}
+
+ if body:
+ version_regex = '.*VTP version running\s+:\s+(?P\d).*'
+ domain_regex = '.*VTP Domain Name\s+:\s+(?P\S+).*'
+
+ try:
+ match_version = re.match(version_regex, body, re.DOTALL)
+ version = match_version.groupdict()['version']
+ except AttributeError:
+ version = ''
+
+ try:
+ match_domain = re.match(domain_regex, body, re.DOTALL)
+ domain = match_domain.groupdict()['domain']
+ except AttributeError:
+ domain = ''
+
+ if domain and version:
+ vtp_parsed['domain'] = domain
+ vtp_parsed['version'] = version
+ vtp_parsed['vtp_password'] = get_vtp_password(module)
+
+ return vtp_parsed
+
+
+def get_vtp_password(module):
+ command = 'show vtp password'
+ body = execute_show_command(command, module)[0]
+ password = body['passwd']
+ if password:
+ return str(password)
+ else:
+ return ""
+
+
+def main():
+ argument_spec = dict(
+ vtp_password=dict(type='str', no_log=True),
+ state=dict(choices=['absent', 'present'],
+ default='present'),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ vtp_password = module.params['vtp_password'] or None
+ state = module.params['state']
+
+ existing = get_vtp_config(module)
+ end_state = existing
+
+ args = dict(vtp_password=vtp_password)
+
+ changed = False
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+
+ commands = []
+ if state == 'absent':
+ if vtp_password is not None:
+ if existing['vtp_password'] == proposed['vtp_password']:
+ commands.append(['no vtp password'])
+ else:
+ module.fail_json(msg="Proposed vtp password doesn't match "
+ "current vtp password. It cannot be "
+ "removed when state=absent. If you are "
+ "trying to change the vtp password, use "
+ "state=present.")
+ else:
+ if not existing.get('domain'):
+ module.fail_json(msg='Cannot remove a vtp password '
+ 'before vtp domain is set.')
+
+ elif existing['vtp_password'] != ('\\'):
+ commands.append(['no vtp password'])
+
+ elif state == 'present':
+ if delta:
+ if not existing.get('domain'):
+ module.fail_json(msg='Cannot set vtp password '
+ 'before vtp domain is set.')
+
+ else:
+ commands.append(['vtp password {0}'.format(vtp_password)])
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_vtp_config(module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_vtp_version.py b/network/nxos/nxos_vtp_version.py
new file mode 100644
index 00000000000..bd10745ba86
--- /dev/null
+++ b/network/nxos/nxos_vtp_version.py
@@ -0,0 +1,418 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+
+module: nxos_vtp_version
+version_added: "2.2"
+short_description: Manages VTP version configuration.
+description:
+ - Manages VTP version configuration.
+extends_documentation_fragment: nxos
+author:
+ - Gabriele Gerbino (@GGabriele)
+notes:
+ - VTP feature must be active on the device to use this module.
+ - This module is used to manage only VTP version.
+ - Use this in combination with M(nxos_vtp_password) and M(nxos_vtp_version)
+ to fully manage VTP operations.
+options:
+ version:
+ description:
+ - VTP version number.
+ required: true
+ choices: ['1', '2']
+'''
+EXAMPLES = '''
+# ENSURE VTP VERSION IS 2
+- nxos_vtp_version:
+ version: 2
+ host: "{{ inventory_hostname }}"
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: always
+ type: dict
+ sample: {"version": "2"}
+existing:
+ description:
+ - k/v pairs of existing vtp
+ type: dict
+ sample: {"domain": "testing", "version": "1", "vtp_password": "\"}
+end_state:
+ description: k/v pairs of vtp after module execution
+ returned: always
+ type: dict
+ sample: {"domain": "testing", "version": "2", "vtp_password": "\"}
+updates:
+ description: command sent to the device
+ returned: always
+ type: list
+ sample: ["vtp version 2"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+import json
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+
+def execute_config_command(commands, module):
+ try:
+ module.configure(commands)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+ except AttributeError:
+ try:
+ commands.insert(0, 'configure')
+ module.cli.add_commands(commands, output='config')
+ module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending CLI commands',
+ error=str(clie), commands=commands)
+
+
+def get_cli_body_ssh(command, response, module):
+ """Get response for when transport=cli. This is kind of a hack and mainly
+ needed because these modules were originally written for NX-API. And
+ not every command supports "| json" when using cli/ssh. As such, we assume
+ if | json returns an XML string, it is a valid command, but that the
+ resource doesn't exist yet. Instead, the output will be a raw string
+ when issuing commands containing 'show run'.
+ """
+ if 'xml' in response[0] or response[0] == '\n':
+ body = []
+ elif 'status' in command:
+ body = response
+ else:
+ try:
+ body = [json.loads(response[0])]
+ except ValueError:
+ module.fail_json(msg='Command does not support JSON output',
+ command=command)
+ return body
+
+
+def execute_show(cmds, module, command_type=None):
+ command_type_map = {
+ 'cli_show': 'json',
+ 'cli_show_ascii': 'text'
+ }
+
+ try:
+ if command_type:
+ response = module.execute(cmds, command_type=command_type)
+ else:
+ response = module.execute(cmds)
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ except AttributeError:
+ try:
+ if command_type:
+ command_type = command_type_map.get(command_type)
+ module.cli.add_commands(cmds, output=command_type)
+ response = module.cli.run_commands()
+ else:
+ module.cli.add_commands(cmds, raw=True)
+ response = module.cli.run_commands()
+ except ShellError:
+ clie = get_exception()
+ module.fail_json(msg='Error sending {0}'.format(cmds),
+ error=str(clie))
+ return response
+
+
+def execute_show_command(command, module, command_type='cli_show'):
+ if module.params['transport'] == 'cli':
+ if 'status' not in command:
+ command += ' | json'
+ cmds = [command]
+ response = execute_show(cmds, module)
+ body = get_cli_body_ssh(command, response, module)
+ elif module.params['transport'] == 'nxapi':
+ cmds = [command]
+ body = execute_show(cmds, module, command_type=command_type)
+
+ return body
+
+
+def flatten_list(command_lists):
+ flat_command_list = []
+ for command in command_lists:
+ if isinstance(command, list):
+ flat_command_list.extend(command)
+ else:
+ flat_command_list.append(command)
+ return flat_command_list
+
+
+def get_vtp_config(module):
+ command = 'show vtp status'
+
+ body = execute_show_command(
+ command, module, command_type='cli_show_ascii')[0]
+ vtp_parsed = {}
+
+ if body:
+ version_regex = '.*VTP version running\s+:\s+(?P\d).*'
+ domain_regex = '.*VTP Domain Name\s+:\s+(?P\S+).*'
+
+ try:
+ match_version = re.match(version_regex, body, re.DOTALL)
+ version = match_version.groupdict()['version']
+ except AttributeError:
+ version = ''
+
+ try:
+ match_domain = re.match(domain_regex, body, re.DOTALL)
+ domain = match_domain.groupdict()['domain']
+ except AttributeError:
+ domain = ''
+
+ if domain and version:
+ vtp_parsed['domain'] = domain
+ vtp_parsed['version'] = version
+ vtp_parsed['vtp_password'] = get_vtp_password(module)
+
+ return vtp_parsed
+
+
+def get_vtp_password(module):
+ command = 'show vtp password'
+ body = execute_show_command(command, module)[0]
+ password = body['passwd']
+ if password:
+ return str(password)
+ else:
+ return ""
+
+
+def main():
+ argument_spec = dict(
+ version=dict(type='str', choices=['1', '2'], required=True),
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ version = module.params['version']
+
+ existing = get_vtp_config(module)
+ end_state = existing
+
+ args = dict(version=version)
+
+ changed = False
+ proposed = dict((k, v) for k, v in args.iteritems() if v is not None)
+ delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
+
+ commands = []
+ if delta:
+ commands.append(['vtp version {0}'.format(version)])
+
+ cmds = flatten_list(commands)
+ if cmds:
+ if module.check_mode:
+ module.exit_json(changed=True, commands=cmds)
+ else:
+ changed = True
+ execute_config_command(cmds, module)
+ end_state = get_vtp_config(module)
+ if 'configure' in cmds:
+ cmds.pop(0)
+
+ results = {}
+ results['proposed'] = proposed
+ results['existing'] = existing
+ results['end_state'] = end_state
+ results['updates'] = cmds
+ results['changed'] = changed
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_vxlan_vtep.py b/network/nxos/nxos_vxlan_vtep.py
new file mode 100644
index 00000000000..6d29597cd28
--- /dev/null
+++ b/network/nxos/nxos_vxlan_vtep.py
@@ -0,0 +1,530 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_vxlan_vtep
+version_added: "2.2"
+short_description: Manages VXLAN Network Virtualization Endpoint (NVE).
+description:
+ - Manages VXLAN Network Virtualization Endpoint (NVE) overlay interface
+ that terminates VXLAN tunnels.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - The module is used to manage NVE properties, not to create NVE
+ interfaces. Use M(nxos_interface) if you wish to do so.
+ - C(state=absent) removes the interface.
+ - Default, where supported, restores params default value.
+options:
+ interface:
+ description:
+ - Interface name for the VXLAN Network Virtualization Endpoint.
+ required: true
+ description:
+ description:
+ - Description of the NVE interface.
+ required: false
+ default: null
+ host_reachability:
+ description:
+ - Specify mechanism for host reachability advertisement.
+ required: false
+ choices: ['true', 'false']
+ default: null
+ shutdown:
+ description:
+ - Administratively shutdown the NVE interface.
+ required: false
+ choices: ['true','false']
+ default: false
+ source_interface:
+ description:
+ - Specify the loopback interface whose IP address should be
+ used for the NVE interface.
+ required: false
+ default: null
+ source_interface_hold_down_time:
+ description:
+ - Suppresses advertisement of the NVE loopback address until
+ the overlay has converged.
+ required: false
+ default: null
+ state:
+ description:
+ - Determines whether the config should be present or not
+ on the device.
+ required: false
+ default: present
+ choices: ['present','absent']
+'''
+EXAMPLES = '''
+- nxos_vxlan_vtep:
+ interface: nve1
+ description: default
+ host_reachability: default
+ source_interface: Loopback0
+ source_interface_hold_down_time: 30
+ shutdown: default
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"description": "simple description", "host_reachability": true,
+ "interface": "nve1", "shutdown": true, "source_interface": "loopback0",
+ "source_interface_hold_down_time": "30"}
+existing:
+ description: k/v pairs of existing VXLAN VTEP configuration
+ returned: verbose mode
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of VXLAN VTEP configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"description": "simple description", "host_reachability": true,
+ "interface": "nve1", "shutdown": true, "source_interface": "loopback0",
+ "source_interface_hold_down_time": "30"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["interface nve1", "source-interface loopback0",
+ "source-interface hold-down-time 30", "description simple description",
+ "shutdown", "host-reachability protocol bgp"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+BOOL_PARAMS = [
+ 'shutdown',
+ 'host_reachability'
+]
+PARAM_TO_COMMAND_KEYMAP = {
+ 'description': 'description',
+ 'host_reachability': 'host-reachability protocol bgp',
+ 'interface': 'interface',
+ 'shutdown': 'shutdown',
+ 'source_interface': 'source-interface',
+ 'source_interface_hold_down_time': 'source-interface hold-down-time'
+}
+PARAM_TO_DEFAULT_KEYMAP = {
+ 'description': False,
+ 'shutdown': True,
+}
+
+WARNINGS = []
+
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(arg, config, module):
+ if arg in BOOL_PARAMS:
+ REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ NO_SHUT_REGEX = re.compile(r'\s+no shutdown\s*$', re.M)
+ value = False
+ if arg == 'shutdown':
+ try:
+ if NO_SHUT_REGEX.search(config):
+ value = False
+ elif REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+ else:
+ try:
+ if REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+ else:
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ NO_DESC_REGEX = re.compile(r'\s+{0}\s*$'.format('no description'), re.M)
+ SOURCE_INTF_REGEX = re.compile(r'(?:{0}\s)(?P\S+)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if arg == 'description':
+ if NO_DESC_REGEX.search(config):
+ value = ''
+ elif PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value').strip()
+ elif arg == 'source_interface':
+ for line in config.splitlines():
+ try:
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = SOURCE_INTF_REGEX.search(config).group('value').strip()
+ break
+ except AttributeError:
+ value = ''
+ else:
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value').strip()
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ netcfg = get_config(module)
+
+ interface_string = 'interface {0}'.format(module.params['interface'].lower())
+ parents = [interface_string]
+ config = netcfg.get_section(parents)
+
+ if config:
+ for arg in args:
+ existing[arg] = get_value(arg, config, module)
+
+ existing['interface'] = module.params['interface'].lower()
+ else:
+ if interface_string in str(netcfg):
+ existing['interface'] = module.params['interface'].lower()
+ for arg in args:
+ existing[arg] = ''
+ return existing
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def fix_commands(commands, module):
+ source_interface_command = ''
+ no_source_interface_command = ''
+
+ for command in commands:
+ if 'no source-interface hold-down-time' in command:
+ pass
+ elif 'source-interface hold-down-time' in command:
+ pass
+ elif 'no source-interface' in command:
+ no_source_interface_command = command
+ elif 'source-interface' in command:
+ source_interface_command = command
+
+ if source_interface_command:
+ commands.pop(commands.index(source_interface_command))
+ commands.insert(0, source_interface_command)
+
+ if no_source_interface_command:
+ commands.pop(commands.index(no_source_interface_command))
+ commands.append(no_source_interface_command)
+ return commands
+
+
+def state_present(module, existing, proposed, candidate):
+ commands = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+ for key, value in proposed_commands.iteritems():
+ if value is True:
+ commands.append(key)
+
+ elif value is False:
+ commands.append('no {0}'.format(key))
+
+ elif value == 'default':
+ if existing_commands.get(key):
+ existing_value = existing_commands.get(key)
+ commands.append('no {0} {1}'.format(key, existing_value))
+ else:
+ if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
+ commands.append('no {0}'.format(key.lower()))
+ module.exit_json(commands=commands)
+ else:
+ command = '{0} {1}'.format(key, value.lower())
+ commands.append(command)
+
+ if commands:
+ commands = fix_commands(commands, module)
+ parents = ['interface {0}'.format(module.params['interface'].lower())]
+ candidate.add(commands, parents=parents)
+ else:
+ if not existing and module.params['interface']:
+ commands = ['interface {0}'.format(module.params['interface'].lower())]
+ candidate.add(commands, parents=[])
+
+
+def state_absent(module, existing, proposed, candidate):
+ commands = ['no interface {0}'.format(module.params['interface'].lower())]
+ candidate.add(commands, parents=[])
+
+
+def main():
+ argument_spec = dict(
+ interface=dict(required=True, type='str'),
+ description=dict(required=False, type='str'),
+ host_reachability=dict(required=False, type='bool'),
+ shutdown=dict(required=False, type='bool'),
+ source_interface=dict(required=False, type='str'),
+ source_interface_hold_down_time=dict(required=False, type='str'),
+ m_facts=dict(required=False, default=False, type='bool'),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ interface = module.params['interface'].lower()
+
+ args = [
+ 'interface',
+ 'description',
+ 'host_reachability',
+ 'shutdown',
+ 'source_interface',
+ 'source_interface_hold_down_time'
+ ]
+
+ existing = invoke('get_existing', module, args)
+ end_state = existing
+ proposed_args = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ proposed = {}
+ for key, value in proposed_args.iteritems():
+ if key != 'interface':
+ if str(value).lower() == 'true':
+ value = True
+ elif str(value).lower() == 'false':
+ value = False
+ elif str(value).lower() == 'default':
+ value = PARAM_TO_DEFAULT_KEYMAP.get(key)
+ if value is None:
+ if key in BOOL_PARAMS:
+ value = False
+ else:
+ value = 'default'
+ if existing.get(key) or (not existing.get(key) and value):
+ proposed[key] = value
+
+ result = {}
+ if state == 'present' or (state == 'absent' and existing):
+ if not existing:
+ WARNINGS.append("The proposed NVE interface did not exist. "
+ "It's recommended to use nxos_interface to create "
+ "all logical interfaces.")
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed_args
+
+ if WARNINGS:
+ result['warnings'] = WARNINGS
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/nxos/nxos_vxlan_vtep_vni.py b/network/nxos/nxos_vxlan_vtep_vni.py
new file mode 100644
index 00000000000..cf354d59c4f
--- /dev/null
+++ b/network/nxos/nxos_vxlan_vtep_vni.py
@@ -0,0 +1,589 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: nxos_vxlan_vtep_vni
+version_added: "2.2"
+short_description: Creates a Virtual Network Identifier member (VNI)
+description:
+ - Creates a Virtual Network Identifier member (VNI) for an NVE
+ overlay interface.
+author: Gabriele Gerbino (@GGabriele)
+extends_documentation_fragment: nxos
+notes:
+ - default, where supported, restores params default value.
+options:
+ interface:
+ description:
+ - Interface name for the VXLAN Network Virtualization Endpoint.
+ required: true
+ vni:
+ description:
+ - ID of the Virtual Network Identifier.
+ required: true
+ assoc_vrf:
+ description:
+ - This attribute is used to identify and separate processing VNIs
+ that are associated with a VRF and used for routing. The VRF
+ and VNI specified with this command must match the configuration
+ of the VNI under the VRF.
+ required: false
+ choices: ['true','false']
+ default: null
+ ingress_replication:
+ description:
+ - Specifies mechanism for host reachability advertisement.
+ required: false
+ choices: ['bgp','static']
+ default: null
+ multicast_group:
+ description:
+ - The multicast group (range) of the VNI. Valid values are
+ string and keyword 'default'.
+ required: false
+ default: null
+ peer_list:
+ description:
+ - Set the ingress-replication static peer list. Valid values
+ are an array, a space-separated string of ip addresses,
+ or the keyword 'default'.
+ required: false
+ default: null
+ suppress_arp:
+ description:
+ - Suppress arp under layer 2 VNI.
+ required: false
+ choices: ['true','false']
+ default: null
+ state:
+ description:
+ - Determines whether the config should be present or not
+ on the device.
+ required: false
+ default: present
+ choices: ['present','absent']
+ include_defaults:
+ description:
+ - Specify to use or not the complete running configuration
+ for module operations.
+ required: false
+ default: true
+ choices: ['true','true']
+ config:
+ description:
+ - Configuration string to be used for module operations. If not
+ specified, the module will use the current running configuration.
+ required: false
+ default: null
+ save:
+ description:
+ - Specify to save the running configuration after
+ module operations.
+ required: false
+ default: false
+ choices: ['true','false']
+'''
+EXAMPLES = '''
+- nxos_vxlan_vtep_vni:
+ interface: nve1
+ vni: 6000
+ ingress_replication: default
+ username: "{{ un }}"
+ password: "{{ pwd }}"
+ host: "{{ inventory_hostname }}"
+'''
+
+RETURN = '''
+proposed:
+ description: k/v pairs of parameters passed into module
+ returned: verbose mode
+ type: dict
+ sample: {"ingress_replication": "default", "interface": "nve1", "vni": "6000"}
+existing:
+ description: k/v pairs of existing configuration
+ returned: verbose mode
+ type: dict
+ sample: {}
+end_state:
+ description: k/v pairs of configuration after module execution
+ returned: verbose mode
+ type: dict
+ sample: {"assoc_vrf": false, "ingress_replication": "", "interface": "nve1",
+ "multicast_group": "", "peer_list": [],
+ "suppress_arp": false, "vni": "6000"}
+updates:
+ description: commands sent to the device
+ returned: always
+ type: list
+ sample: ["interface nve1", "member vni 6000"]
+changed:
+ description: check to see if a change was made on the device
+ returned: always
+ type: boolean
+ sample: true
+'''
+
+# COMMON CODE FOR MIGRATION
+import re
+
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
+from ansible.module_utils.shell import ShellError
+
+try:
+ from ansible.module_utils.nxos import get_module
+except ImportError:
+ from ansible.module_utils.nxos import NetworkModule
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class CustomNetworkConfig(NetworkConfig):
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.children:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ parents = [p.text for p in item.parents]
+ if parents == path[:-1]:
+ return item
+
+ def to_block(self, section):
+ return '\n'.join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError('path does not exist in config')
+ return self.expand_section(obj)
+
+
+ def add(self, lines, parents=None):
+ """Adds one or lines of configuration
+ """
+
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ ## global config command
+ if not parents:
+ for line in to_list(lines):
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_section_objects(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self.indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj.parents = list(ancestors)
+ ancestors[-1].children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in to_list(lines):
+ # check if child already exists
+ for child in ancestors[-1].children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self.indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item.parents = ancestors
+ ancestors[-1].children.append(item)
+ self.items.append(item)
+
+
+def get_network_module(**kwargs):
+ try:
+ return get_module(**kwargs)
+ except NameError:
+ return NetworkModule(**kwargs)
+
+def get_config(module, include_defaults=False):
+ config = module.params['config']
+ if not config:
+ try:
+ config = module.get_config()
+ except AttributeError:
+ defaults = module.params['include_defaults']
+ config = module.config.get_config(include_defaults=defaults)
+ return CustomNetworkConfig(indent=2, contents=config)
+
+def load_config(module, candidate):
+ config = get_config(module)
+
+ commands = candidate.difference(config)
+ commands = [str(c).strip() for c in commands]
+
+ save_config = module.params['save']
+
+ result = dict(changed=False)
+
+ if commands:
+ if not module.check_mode:
+ try:
+ module.configure(commands)
+ except AttributeError:
+ module.config(commands)
+
+ if save_config:
+ try:
+ module.config.save_config()
+ except AttributeError:
+ module.execute(['copy running-config startup-config'])
+
+ result['changed'] = True
+ result['updates'] = commands
+
+ return result
+# END OF COMMON CODE
+
+BOOL_PARAMS = ['suppress_arp']
+PARAM_TO_COMMAND_KEYMAP = {
+ 'assoc_vrf': 'associate-vrf',
+ 'interface': 'interface',
+ 'vni': 'member vni',
+ 'ingress_replication': 'ingress-replication protocol',
+ 'multicast_group': 'mcast-group',
+ 'peer_list': 'peer-ip',
+ 'suppress_arp': 'suppress-arp'
+}
+PARAM_TO_DEFAULT_KEYMAP = {}
+WARNINGS = []
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+
+def get_value(arg, config, module):
+ if arg in BOOL_PARAMS:
+ REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = False
+ try:
+ if REGEX.search(config):
+ value = True
+ except TypeError:
+ value = False
+ else:
+ REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
+ value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in config:
+ value = REGEX.search(config).group('value')
+ return value
+
+
+def check_interface(module, netcfg):
+ config = str(netcfg)
+
+ REGEX = re.compile(r'(?:interface nve)(?P.*)$', re.M)
+ value = ''
+ if 'interface nve' in config:
+ value = 'nve{0}'.format(REGEX.search(config).group('value'))
+
+ return value
+
+
+def get_custom_value(arg, config, module):
+ splitted_config = config.splitlines()
+ if arg == 'assoc_vrf':
+ value = False
+ if 'associate-vrf' in config:
+ value = True
+ elif arg == 'peer_list':
+ value = []
+ REGEX = re.compile(r'(?:peer-ip\s)(?P.*)$', re.M)
+ for line in splitted_config:
+ peer_value = ''
+ if PARAM_TO_COMMAND_KEYMAP[arg] in line:
+ peer_value = REGEX.search(line).group('peer_value')
+ if peer_value:
+ value.append(peer_value)
+ return value
+
+
+def get_existing(module, args):
+ existing = {}
+ netcfg = get_config(module)
+
+ custom = [
+ 'assoc_vrf',
+ 'peer_list'
+ ]
+
+ interface_exist = check_interface(module, netcfg)
+ if interface_exist:
+ parents = ['interface {0}'.format(interface_exist)]
+ temp_config = netcfg.get_section(parents)
+
+ if 'associate-vrf' in temp_config:
+ parents.append('member vni {0} associate-vrf'.format(
+ module.params['vni']))
+ config = netcfg.get_section(parents)
+ elif 'member vni' in temp_config:
+ parents.append('member vni {0}'.format(module.params['vni']))
+ config = netcfg.get_section(parents)
+ else:
+ config = {}
+
+ if config:
+ for arg in args:
+ if arg not in ['interface', 'vni']:
+ if arg in custom:
+ existing[arg] = get_custom_value(arg, config, module)
+ else:
+ existing[arg] = get_value(arg, config, module)
+ existing['interface'] = interface_exist
+ existing['vni'] = module.params['vni']
+
+ return existing, interface_exist
+
+
+def apply_key_map(key_map, table):
+ new_dict = {}
+ for key, value in table.items():
+ new_key = key_map.get(key)
+ if new_key:
+ value = table.get(key)
+ if value:
+ new_dict[new_key] = value
+ else:
+ new_dict[new_key] = value
+ return new_dict
+
+
+def state_present(module, existing, proposed, candidate):
+ commands = list()
+ proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
+ existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
+
+ for key, value in proposed_commands.iteritems():
+ if key == 'associate-vrf':
+ command = 'member vni {0} {1}'.format(module.params['vni'], key)
+
+ if value:
+ commands.append(command)
+ else:
+ commands.append('no {0}'.format(command))
+
+ elif key == 'peer-ip' and value != 'default':
+ for peer in value:
+ commands.append('{0} {1}'.format(key, peer))
+
+ elif value is True:
+ commands.append(key)
+
+ elif value is False:
+ commands.append('no {0}'.format(key))
+
+ elif value == 'default':
+ if existing_commands.get(key):
+ existing_value = existing_commands.get(key)
+ if key == 'peer-ip':
+ for peer in existing_value:
+ commands.append('no {0} {1}'.format(key, peer))
+ else:
+ commands.append('no {0} {1}'.format(key, existing_value))
+ else:
+ if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
+ commands.append('no {0}'.format(key.lower()))
+ else:
+ command = '{0} {1}'.format(key, value.lower())
+ commands.append(command)
+
+ if commands:
+ vni_command = 'member vni {0}'.format(module.params['vni'])
+ ingress_replication_command = 'ingress-replication protocol static'
+ interface_command = 'interface {0}'.format(module.params['interface'])
+
+ if ingress_replication_command in commands:
+ static_level_cmds = [cmd for cmd in commands if 'peer' in cmd]
+ parents = [interface_command, vni_command, ingress_replication_command]
+ candidate.add(static_level_cmds, parents=parents)
+ commands = [cmd for cmd in commands if 'peer' not in cmd]
+
+ if vni_command in commands:
+ parents = [interface_command]
+ commands.remove(vni_command)
+ if module.params['assoc_vrf'] is None:
+ parents.append(vni_command)
+ candidate.add(commands, parents=parents)
+
+
+def state_absent(module, existing, proposed, candidate):
+ if existing['assoc_vrf']:
+ commands = ['no member vni {0} associate-vrf'.format(
+ module.params['vni'])]
+ else:
+ commands = ['no member vni {0}'.format(module.params['vni'])]
+ parents = ['interface {0}'.format(module.params['interface'])]
+ candidate.add(commands, parents=parents)
+
+
+def main():
+ argument_spec = dict(
+ interface=dict(required=True, type='str'),
+ vni=dict(required=True, type='str'),
+ assoc_vrf=dict(required=False, type='bool'),
+ multicast_group=dict(required=False, type='str'),
+ peer_list=dict(required=False, type='list'),
+ suppress_arp=dict(required=False, type='bool'),
+ ingress_replication=dict(required=False, type='str',
+ choices=['bgp', 'static', 'default']),
+ state=dict(choices=['present', 'absent'], default='present',
+ required=False),
+ include_defaults=dict(default=True),
+ config=dict(),
+ save=dict(type='bool', default=False)
+ )
+ module = get_network_module(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ if module.params['assoc_vrf']:
+ mutually_exclusive_params = ['multicast_group',
+ 'suppress_arp',
+ 'ingress_replication']
+ for param in mutually_exclusive_params:
+ if module.params[param]:
+ module.fail_json(msg='assoc_vrf cannot be used with '
+ '{0} param'.format(param))
+ if module.params['peer_list']:
+ if module.params['ingress_replication'] != 'static':
+ module.fail_json(msg='ingress_replication=static is required '
+ 'when using peer_list param')
+ else:
+ peer_list = module.params['peer_list']
+ if peer_list[0] == 'default':
+ module.params['peer_list'] = 'default'
+ else:
+ stripped_peer_list = map(str.strip, peer_list)
+ module.params['peer_list'] = stripped_peer_list
+
+ state = module.params['state']
+ args = [
+ 'assoc_vrf',
+ 'interface',
+ 'vni',
+ 'ingress_replication',
+ 'multicast_group',
+ 'peer_list',
+ 'suppress_arp'
+ ]
+
+ existing, interface_exist = invoke('get_existing', module, args)
+ end_state = existing
+ proposed_args = dict((k, v) for k, v in module.params.iteritems()
+ if v is not None and k in args)
+
+ proposed = {}
+ for key, value in proposed_args.iteritems():
+ if key != 'interface':
+ if str(value).lower() == 'default':
+ value = PARAM_TO_DEFAULT_KEYMAP.get(key)
+ if value is None:
+ value = 'default'
+ if existing.get(key) or (not existing.get(key) and value):
+ proposed[key] = value
+
+ result = {}
+ if state == 'present' or (state == 'absent' and existing):
+ if not interface_exist:
+ WARNINGS.append("The proposed NVE interface does not exist. "
+ "Use nxos_interface to create it first.")
+ elif interface_exist != module.params['interface']:
+ module.fail_json(msg='Only 1 NVE interface is allowed on '
+ 'the switch.')
+ elif (existing and state == 'absent' and
+ existing['vni'] != module.params['vni']):
+ module.fail_json(msg="ERROR: VNI delete failed: Could not find"
+ " vni node for {0}".format(
+ module.params['vni']),
+ existing_vni=existing['vni'])
+ else:
+ candidate = CustomNetworkConfig(indent=3)
+ invoke('state_%s' % state, module, existing, proposed, candidate)
+
+ try:
+ response = load_config(module, candidate)
+ result.update(response)
+ except ShellError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+ else:
+ result['updates'] = []
+
+ result['connected'] = module.connected
+ if module._verbosity > 0:
+ end_state, interface_exist = invoke('get_existing', module, args)
+ result['end_state'] = end_state
+ result['existing'] = existing
+ result['proposed'] = proposed_args
+
+ if WARNINGS:
+ result['warnings'] = WARNINGS
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/openswitch/ops_template.py b/network/openswitch/_ops_template.py
similarity index 73%
rename from network/openswitch/ops_template.py
rename to network/openswitch/_ops_template.py
index 644e103e2ee..d3cb0e00d10 100644
--- a/network/openswitch/ops_template.py
+++ b/network/openswitch/_ops_template.py
@@ -15,6 +15,11 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+
DOCUMENTATION = """
---
module: ops_template
@@ -28,6 +33,7 @@
against a provided candidate configuration. If there are changes, the
candidate configuration is merged with the current configuration and
pushed into OpenSwitch
+deprecated: Deprecated in 2.2. Use ops_config instead
extends_documentation_fragment: openswitch
options:
src:
@@ -86,51 +92,26 @@
RETURN = """
updates:
description: The list of configuration updates to be merged
- retured: always
+ returned: always
type: dict
sample: {obj, obj}
responses:
- desription: returns the responses when configuring using cli
+ description: returns the responses when configuring using cli
returned: when transport == cli
type: list
sample: [...]
"""
-import copy
-
-
-def compare(this, other):
- parents = [item.text for item in this.parents]
- for entry in other:
- if this == entry:
- return None
- return this
-
-
-def expand(obj, queue):
- block = [item.raw for item in obj.parents]
- block.append(obj.raw)
-
- current_level = queue
- for b in block:
- if b not in current_level:
- current_level[b] = collections.OrderedDict()
- current_level = current_level[b]
- for c in obj.children:
- if c.raw not in current_level:
- current_level[c.raw] = collections.OrderedDict()
-
-def flatten(data, obj):
- for k, v in data.items():
- obj.append(k)
- flatten(v, obj)
- return obj
+import ansible.module_utils.openswitch
+from ansible.module_utils.netcfg import NetworkConfig, dumps
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.openswitch import HAS_OPS
def get_config(module):
config = module.params['config'] or dict()
if not config and not module.params['force']:
- config = module.config
+ config = module.config.get_config()
return config
@@ -170,7 +151,6 @@ def merge(changeset, config=None):
current_level[key] = value
return config
-
def main():
""" main entry point for module execution
"""
@@ -184,18 +164,25 @@ def main():
mutually_exclusive = [('config', 'backup'), ('config', 'force')]
- module = get_module(argument_spec=argument_spec,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=True)
+ module = NetworkModule(argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
+
+ if not module.params['transport'] and not HAS_OPS:
+ module.fail_json(msg='unable to import ops.dc library')
result = dict(changed=False)
contents = get_config(module)
- result['_backup'] = copy.deepcopy(module.config)
+ result['_backup'] = contents
if module.params['transport'] in ['ssh', 'rest']:
config = contents
- src = module.from_json(module.params['src'])
+
+ try:
+ src = module.from_json(module.params['src'])
+ except ValueError:
+ module.fail_json(msg='unable to load src due to json parsing error')
changeset = diff(src, config)
candidate = merge(changeset, config)
@@ -208,45 +195,32 @@ def main():
if changeset:
if not module.check_mode:
- module.configure(config)
+ module.config(config)
result['changed'] = True
else:
- config = module.parse_config(config)
- candidate = module.parse_config(module.params['src'])
-
- commands = collections.OrderedDict()
- toplevel = [c.text for c in config]
+ candidate = NetworkConfig(contents=module.params['src'], indent=4)
- for line in candidate:
- if line.text in ['!', '']:
- continue
+ if contents:
+ config = NetworkConfig(contents=contents, indent=4)
- if not line.parents:
- if line.text not in toplevel:
- expand(line, commands)
- else:
- item = compare(line, config)
- if item:
- expand(item, commands)
-
- commands = flatten(commands, list())
+ if not module.params['force']:
+ commands = candidate.difference(config)
+ commands = dumps(commands, 'commands').split('\n')
+ commands = [str(c) for c in commands if c]
+ else:
+ commands = str(candidate).split('\n')
if commands:
if not module.check_mode:
- commands = [str(c).strip() for c in commands]
- response = module.configure(commands)
+ response = module.config(commands)
result['responses'] = response
result['changed'] = True
+
result['updates'] = commands
module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.openswitch import *
if __name__ == '__main__':
main()
diff --git a/network/openswitch/ops_command.py b/network/openswitch/ops_command.py
index 27a668ff203..0f1ffd04a1d 100644
--- a/network/openswitch/ops_command.py
+++ b/network/openswitch/ops_command.py
@@ -16,11 +16,15 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: ops_command
version_added: "2.1"
-author: "Peter sprygada (@privateip)"
+author: "Peter Sprygada (@privateip)"
short_description: Run arbitrary commands on OpenSwitch devices.
description:
- Sends arbitrary commands to an OpenSwitch node and returns the results
@@ -33,25 +37,39 @@
description:
- List of commands to send to the remote ops device over the
configured provider. The resulting output from the command
- is returned. If the I(waitfor) argument is provided, the
+ is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retires as expired.
required: true
- waitfor:
+ wait_for:
description:
- List of conditions to evaluate against the output of the
- command. The task will wait for a each condition to be true
+ command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
- within the configured number of I(retries), the task fails.
+ within the configured number of retries, the task fails.
See examples.
required: false
default: null
+ aliases: ['waitfor']
+ version_added: "2.2"
+ match:
+ description:
+ - The I(match) argument is used in conjunction with the
+ I(wait_for) argument to specify the match policy. Valid
+ values are C(all) or C(any). If the value is set to C(all)
+ then all conditionals in the I(wait_for) must be satisfied. If
+ the value is set to C(any) then only one of the values must be
+ satisfied.
+ required: false
+ default: all
+ choices: ['any', 'all']
+ version_added: "2.2"
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
- I(waitfor) conditions.
+ I(wait_for) conditions.
required: false
default: 10
interval:
@@ -65,21 +83,32 @@
"""
EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: netop
+ password: netop
+ transport: cli
+
- ops_command:
commands:
- show version
- register: output
+ provider: "{{ cli }}"
- ops_command:
commands:
- show version
- waitfor:
+ wait_for:
- "result[0] contains OpenSwitch"
+ provider: "{{ cli }}"
- ops_command:
- commands:
- - show version
- - show interfaces
+ commands:
+ - show version
+ - show interfaces
+ provider: "{{ cli }}"
"""
RETURN = """
@@ -97,71 +126,103 @@
failed_conditions:
description: the conditionals that failed
- retured: failed
+ returned: failed
type: list
sample: ['...', '...']
"""
+import ansible.module_utils.openswitch
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.netcli import CommandRunner
+from ansible.module_utils.netcli import AddCommandError, FailedConditionsError
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.six import string_types
-import time
-
+VALID_KEYS = ['command', 'prompt', 'response']
def to_lines(stdout):
for item in stdout:
- if isinstance(item, basestring):
+ if isinstance(item, string_types):
item = str(item).split('\n')
yield item
+def parse_commands(module):
+ for cmd in module.params['commands']:
+ if isinstance(cmd, string_types):
+ cmd = dict(command=cmd, output=None)
+ elif 'command' not in cmd:
+ module.fail_json(msg='command keyword argument is required')
+ elif not set(cmd.keys()).issubset(VALID_KEYS):
+ module.fail_json(msg='unknown keyword specified')
+ yield cmd
def main():
spec = dict(
- commands=dict(type='list'),
- waitfor=dict(type='list'),
+ # { command: , prompt: , response: }
+ commands=dict(type='list', required=True),
+
+ wait_for=dict(type='list', aliases=['waitfor']),
+ match=dict(default='all', choices=['all', 'any']),
+
retries=dict(default=10, type='int'),
- interval=dict(default=1, type='int'),
- transport=dict(default='cli', choices=['cli'])
+ interval=dict(default=1, type='int')
)
- module = get_module(argument_spec=spec,
- supports_check_mode=True)
+ module = NetworkModule(argument_spec=spec,
+ connect_on_load=False,
+ supports_check_mode=True)
- commands = module.params['commands']
+ commands = list(parse_commands(module))
+ conditionals = module.params['wait_for'] or list()
- retries = module.params['retries']
- interval = module.params['interval']
+ warnings = list()
- try:
- queue = set()
- for entry in (module.params['waitfor'] or list()):
- queue.add(Conditional(entry))
- except AttributeError:
- exc = get_exception()
- module.fail_json(msg=exc.message)
+ runner = CommandRunner(module)
- result = dict(changed=False)
+ for cmd in commands:
+ if module.check_mode and not cmd['command'].startswith('show'):
+ warnings.append('only show commands are supported when using '
+ 'check mode, not executing `%s`' % cmd['command'])
+ else:
+ if cmd['command'].startswith('conf'):
+ module.fail_json(msg='ops_command does not support running '
+ 'config mode commands. Please use '
+ 'ops_config instead')
+ try:
+ runner.add_command(**cmd)
+ except AddCommandError:
+ exc = get_exception()
+ warnings.append('duplicate command detected: %s' % cmd)
- while retries > 0:
- response = module.execute(commands)
- result['stdout'] = response
+ for item in conditionals:
+ runner.add_conditional(item)
- for item in list(queue):
- if item(response):
- queue.remove(item)
+ runner.retries = module.params['retries']
+ runner.interval = module.params['interval']
+ runner.match = module.params['match']
- if not queue:
- break
+ try:
+ runner.run()
+ except FailedConditionsError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
+
+ result = dict(changed=False, stdout=list())
- time.sleep(interval)
- retries -= 1
- else:
- failed_conditions = [item.raw for item in queue]
- module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions)
+ for cmd in commands:
+ try:
+ output = runner.get_command(cmd['command'])
+ except ValueError:
+ output = 'command not executed due to check_mode, see warnings'
+ result['stdout'].append(output)
+ result['warnings'] = warnings
result['stdout_lines'] = list(to_lines(result['stdout']))
- return module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.openswitch import *
+ module.exit_json(**result)
+
+
if __name__ == '__main__':
- main()
+ main()
diff --git a/network/openswitch/ops_config.py b/network/openswitch/ops_config.py
index 37960038b2b..79c23aafcb4 100644
--- a/network/openswitch/ops_config.py
+++ b/network/openswitch/ops_config.py
@@ -16,11 +16,15 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: ops_config
version_added: "2.1"
-author: "Peter sprygada (@privateip)"
+author: "Peter Sprygada (@privateip)"
short_description: Manage OpenSwitch configuration using CLI
description:
- OpenSwitch configurations use a simple block indent file syntax
@@ -36,7 +40,8 @@
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
- required: true
+ required: false
+ default: null
parents:
description:
- The ordered set of parents that uniquely identify the section
@@ -45,6 +50,17 @@
level or global commands.
required: false
default: null
+ src:
+ description:
+ - The I(src) argument provides a path to the configuration file
+ to load into the remote system. The path can either be a full
+ system path to the configuration file if the value starts with /
+ or relative to the root of the implemented role or playbook.
+ This argument is mutually exclusive with the I(lines) and
+ I(parents) arguments.
+ required: false
+ default: null
+ version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
@@ -57,7 +73,7 @@
after:
description:
- The ordered set of commands to append to the end of the command
- stack if a changed needs to be made. Just like with I(before) this
+ stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
@@ -68,11 +84,13 @@
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
- to position. Finally if match is set to I(exact), command lines
- must be an equal match.
+ to position. If match is set to I(exact), command lines
+ must be an equal match. Finally, if match is set to I(none), the
+ module will not attempt to compare the source configuration with
+ the running configuration on the remote device.
required: false
default: line
- choices: ['line', 'strict', 'exact']
+ choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
@@ -90,9 +108,12 @@
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
+ - Note this argument should be considered deprecated. To achieve
+ the equivalent, set the C(match=none) which is idempotent. This argument
+ will be removed in a future release.
required: false
default: false
- choices: ['true', 'false']
+ choices: ['yes', 'no']
config:
description:
- The module, by default, will connect to the remote device and
@@ -104,20 +125,46 @@
config for comparison.
required: false
default: null
+ save:
+ description:
+ - The C(save) argument instructs the module to save the running-
+ config to the startup-config at the conclusion of the module
+ running. If check mode is specified, this argument is ignored.
+ required: false
+ default: no
+ choices: ['yes', 'no']
+ version_added: "2.2"
"""
EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: netop
+ password: netop
+
- name: configure hostname over cli
ops_config:
- lines:
- - "hostname {{ inventory_hostname }}"
+ lines:
+ - "hostname {{ inventory_hostname }}"
+ provider: "{{ cli }}"
+
- name: configure vlan 10 over cli
ops_config:
- lines:
- - no shutdown
- parents:
- - vlan 10
+ lines:
+ - no shutdown
+ parents:
+ - vlan 10
+ provider: "{{ cli }}"
+
+- name: load config from file
+ ops_config:
+ src: ops01.cfg
+ backup: yes
+ provider: "{{ cli }}"
"""
RETURN = """
@@ -126,122 +173,140 @@
returned: always
type: list
sample: ['...', '...']
-
-responses:
- description: The set of responses from issuing the commands on the device
- retured: when not check_mode
- type: list
- sample: ['...', '...']
+backup_path:
+ description: The full path to the backup file
+ returned: when backup is yes
+ type: path
+ sample: /playbooks/ansible/backup/ops_config.2016-07-16@22:28:34
"""
import re
-import itertools
-
-def get_config(module):
- config = module.params['config'] or dict()
- if not config and not module.params['force']:
- config = module.config
- return config
-
-
-def build_candidate(lines, parents, config, strategy):
- candidate = list()
-
- if strategy == 'strict':
- for index, cmd in enumerate(lines):
- try:
- if cmd != config[index]:
- candidate.append(cmd)
- except IndexError:
- candidate.append(cmd)
-
- elif strategy == 'exact':
- if len(lines) != len(config):
- candidate = list(lines)
- else:
- for cmd, cfg in itertools.izip(lines, config):
- if cmd != cfg:
- candidate = list(lines)
- break
- else:
- for cmd in lines:
- if cmd not in config:
- candidate.append(cmd)
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.openswitch import NetworkModule, NetworkError
+from ansible.module_utils.netcfg import NetworkConfig, dumps
+
+def check_args(module, warnings):
+ if module.params['force']:
+ warnings.append('The force argument is deprecated, please use '
+ 'match=none instead. This argument will be '
+ 'removed in the future')
+
+def get_config(module, result):
+ contents = module.params['config']
+ if not contents:
+ contents = module.config.get_config()
+ return NetworkConfig(indent=4, contents=contents)
+def get_candidate(module):
+ candidate = NetworkConfig(indent=4)
+ if module.params['src']:
+ candidate.load(module.params['src'])
+ elif module.params['lines']:
+ parents = module.params['parents'] or list()
+ candidate.add(module.params['lines'], parents=parents)
return candidate
+def load_config(module, commands, result):
+ if not module.check_mode:
+ module.config(commands)
+ result['changed'] = True
+
+def run(module, result):
+ match = module.params['match']
+ replace = module.params['replace']
+ path = module.params['parents']
+
+ candidate = get_candidate(module)
+
+ if match != 'none':
+ config = get_config(module, result)
+ configobjs = candidate.difference(config, path=path, match=match,
+ replace=replace)
+ else:
+ configobjs = candidate.items
+
+ if configobjs:
+ commands = dumps(configobjs, 'commands').split('\n')
+
+ if module.params['lines']:
+ if module.params['before']:
+ commands[:0] = module.params['before']
+
+ if module.params['after']:
+ commands.extend(module.params['after'])
+
+ result['updates'] = commands
+
+ # send the configuration commands to the device and merge
+ # them with the current running config
+ if not module.check_mode:
+ module.config.load_config(commands)
+ result['changed'] = True
+
+ if module.params['save']:
+ if not module.check_mode:
+ module.config.save_config()
+ result['changed'] = True
def main():
argument_spec = dict(
- lines=dict(aliases=['commands'], required=True, type='list'),
+ src=dict(type='path'),
+
+ lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
+
before=dict(type='list'),
after=dict(type='list'),
- match=dict(default='line', choices=['line', 'strict', 'exact']),
- replace=dict(default='line', choices=['line', 'block']),
- force=dict(default=False, type='bool'),
- config=dict(),
- transport=dict(default='cli', choices=['cli'])
- )
- module = get_module(argument_spec=argument_spec,
- supports_check_mode=True)
+ match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
+ replace=dict(default='line', choices=['line', 'block']),
- lines = module.params['lines']
- parents = module.params['parents'] or list()
+ # this argument is deprecated in favor of setting match: none
+ # it will be removed in a future version
+ force=dict(default=False, type='bool'),
- before = module.params['before']
- after = module.params['after']
+ config=dict(),
- match = module.params['match']
- replace = module.params['replace']
+ save=dict(type='bool', default=False),
+ backup=dict(type='bool', default=False),
- contents = get_config(module)
- config = module.parse_config(contents)
+ # ops_config is only supported over Cli transport so force
+ # the value of transport to be cli
+ transport=dict(default='cli', choices=['cli'])
+ )
- if parents:
- for parent in parents:
- for item in config:
- if item.text == parent:
- config = item
+ mutually_exclusive = [('lines', 'src')]
- try:
- children = [c.text for c in config.children]
- except AttributeError:
- children = [c.text for c in config]
+ required_if = [('match', 'strict', ['lines']),
+ ('match', 'exact', ['lines']),
+ ('replace', 'block', ['lines'])]
- else:
- children = [c.text for c in config if not c.parents]
+ module = NetworkModule(argument_spec=argument_spec,
+ connect_on_load=False,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True)
- result = dict(changed=False)
+ if module.params['force'] is True:
+ module.params['match'] = 'none'
- candidate = build_candidate(lines, parents, children, match)
+ warnings = list()
+ check_args(module, warnings)
- if candidate:
- if replace == 'line':
- candidate[:0] = parents
- else:
- candidate = list(parents)
- candidate.extend(lines)
+ result = dict(changed=False, warnings=warnings)
- if before:
- candidate[:0] = before
+ if module.params['backup']:
+ result['__backup__'] = module.config.get_config()
- if after:
- candidate.extend(after)
+ try:
+ run(module, result)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc))
- if not module.check_mode:
- response = module.configure(candidate)
- result['responses'] = response
- result['changed'] = True
+ module.exit_json(**result)
- result['updates'] = candidate
- return module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.shell import *
-from ansible.module_utils.netcfg import *
-from ansible.module_utils.openswitch import *
if __name__ == '__main__':
main()
diff --git a/network/openswitch/ops_facts.py b/network/openswitch/ops_facts.py
index df33ca8958e..cbf31bc87e7 100644
--- a/network/openswitch/ops_facts.py
+++ b/network/openswitch/ops_facts.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: ops_facts
@@ -22,10 +26,16 @@
author: "Peter Sprygada (@privateip)"
short_description: Collect device specific facts from OpenSwitch
description:
- - This module collects additional device fact information from a
- remote device running OpenSwitch using either the CLI or REST
- interfaces. It provides optional arguments for collecting fact
- information.
+ - Collects facts from devices running the OpenSwitch operating
+ system. Fact collection is supported over both Cli and Rest
+ transports. This module prepends all of the base network fact keys
+ with C(ansible_net_). The facts module will always collect a
+ base set of facts from the device and can enable or disable
+ collection of additional facts.
+ - The facts collected from pre Ansible 2.2 are still available and
+ are collected for backwards compatibility; however, these facts
+ should be considered deprecated and will be removed in a future
+ release.
extends_documentation_fragment: openswitch
options:
config:
@@ -47,129 +57,365 @@
valid when the C(transport=rest).
required: false
default: null
-notes:
- - The use of the REST transport is still experimental until it is
- fully implemented.
+ gather_subset:
+ description:
+ - When supplied, this argument will restrict the facts collected
+ to a given subset. Possible values for this argument include
+ all, hardware, config, legacy, and interfaces. Can specify a
+ list of values to include a larger subset. Values can also be used
+ with an initial C(M(!)) to specify that a specific subset should
+ not be collected.
+ required: false
+ default: '!config'
+ version_added: "2.2"
"""
EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: netop
+ password: netop
+ transport: cli
+ rest:
+ host: "{{ inventory_hostname }}"
+ username: netop
+ password: netop
+ transport: rest
+
+- ops_facts:
+ gather_subset: all
+ provider: "{{ rest }}"
+
+# Collect only the config and default facts
+- ops_facts:
+ gather_subset: config
+ provider: "{{ cli }}"
+
+# Do not collect config facts
+- ops_facts:
+ gather_subset:
+ - "!config"
+ provider: "{{ cli }}"
+
- name: collect device facts
ops_facts:
+ provider: "{{ cli }}"
- name: include the config
ops_facts:
config: yes
+ provider: "{{ rest }}"
- name: include a set of rest endpoints
ops_facts:
endpoints:
- /system/interfaces/1
- /system/interfaces/2
+ provider: "{{ rest }}"
"""
RETURN = """
+ansible_net_gather_subset:
+ description: The list of fact subsets collected from the device
+ returned: always
+ type: list
+
+# default
+ansible_net_model:
+ description: The model name returned from the device
+ returned: when transport is cli
+ type: str
+ansible_net_serialnum:
+ description: The serial number of the remote device
+ returned: when transport is cli
+ type: str
+ansible_net_version:
+ description: The operating system version running on the remote device
+ returned: always
+ type: str
+ansible_net_hostname:
+ description: The configured hostname of the device
+ returned: always
+ type: string
+ansible_net_image:
+ description: The image file the device is running
+ returned: when transport is cli
+ type: string
+
+# config
+ansible_net_config:
+ description: The current active config from the device
+ returned: when config is enabled
+ type: str
+
+# legacy (pre Ansible 2.2)
config:
description: The current system configuration
returned: when enabled
type: string
sample: '....'
-
hostname:
description: returns the configured hostname
returned: always
type: string
sample: ops01
-
version:
description: The current version of OpenSwitch
returned: always
type: string
sample: '0.3.0'
-
endpoints:
description: The JSON response from the URL endpoint
- returned: when endpoints argument is defined
+ returned: when endpoints argument is defined and transport is rest
type: list
sample: [{....}, {....}]
"""
import re
+import ansible.module_utils.openswitch
+from ansible.module_utils.netcli import CommandRunner, AddCommandError
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.six import iteritems
-def get(module, url, expected_status=200):
- response = module.connection.get(url)
- if response.headers['status'] != expected_status:
- module.fail_json(**response.headers)
- return response
+def add_command(runner, command):
+ try:
+ runner.add_command(command)
+ except AddCommandError:
+ # AddCommandError is raised for any issue adding a command to
+ # the runner. Silently ignore the exception in this case
+ pass
-def get_config(module):
- if module.params['transport'] == 'ssh':
- rc, out, err = module.run_command('vtysh -c "show running-config"')
- return out
- elif module.params['transport'] == 'rest':
- response = get(module, '/system/full-configuration')
- return response.json
- elif module.params['transport'] == 'cli':
- response = module.connection.send('show running-config')
- return response[0]
+class FactsBase(object):
+
+ def __init__(self, module, runner):
+ self.module = module
+ self.transport = module.params['transport']
+ self.runner = runner
+ self.facts = dict()
+
+ if self.transport == 'cli':
+ self.commands()
+
+ def commands(self):
+ raise NotImplementedError
+
+ def populate(self):
+ getattr(self, self.transport)()
+
+ def cli(self):
+ pass
+
+ def rest(self):
+ pass
+
+
+class Default(FactsBase):
+
+ def commands(self):
+ add_command(self.runner, 'show system')
+ add_command(self.runner, 'show hostname')
+
+ def rest(self):
+ self.facts.update(self.get_system())
+
+ def cli(self):
+ data = self.runner.get_command('show system')
+
+ self.facts['version'] = self.parse_version(data)
+ self.facts['serialnum'] = self.parse_serialnum(data)
+ self.facts['model'] = self.parse_model(data)
+ self.facts['image'] = self.parse_image(data)
+
+ self.facts['hostname'] = self.runner.get_command('show hostname')
+
+ def parse_version(self, data):
+ match = re.search(r'OpenSwitch Version\s+: (\S+)', data)
+ if match:
+ return match.group(1)
+
+ def parse_model(self, data):
+ match = re.search(r'Platform\s+:\s(\S+)', data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_image(self, data):
+ match = re.search(r'\(Build: (\S+)\)', data, re.M)
+ if match:
+ return match.group(1)
-def get_facts(module):
- if module.params['transport'] == 'rest':
- response = get(module, '/system')
+ def parse_serialnum(self, data):
+ match = re.search(r'Serial Number\s+: (\S+)', data)
+ if match:
+ return match.group(1)
+
+ def get_system(self):
+ response = self.module.connection.get('/system')
return dict(
hostname=response.json['configuration']['hostname'],
version=response.json['status']['switch_version']
)
- elif module.params['transport'] == 'cli':
- response = module.connection.send(['show system', 'show hostname'])
- facts = dict()
- facts['hostname'] = response[1]
- match = re.search(r'OpenSwitch Version\s*:\s*(.*)$', response[0], re.M)
+
+
+class Config(FactsBase):
+
+ def commands(self):
+ add_command(self.runner, 'show running-config')
+
+ def cli(self):
+ self.facts['config'] = self.runner.get_command('show running-config')
+
+class Legacy(FactsBase):
+ # facts from ops_facts 2.1
+
+ def commands(self):
+ add_command(self.runner, 'show system')
+ add_command(self.runner, 'show hostname')
+
+ if self.module.params['config']:
+ add_command(self.runner, 'show running-config')
+
+ def rest(self):
+ self.facts['_endpoints'] = self.get_endpoints()
+ self.facts.update(self.get_system())
+
+ if self.module.params['config']:
+ self.facts['_config'] = self.get_config()
+
+ def cli(self):
+ self.facts['_hostname'] = self.runner.get_command('show hostname')
+
+ data = self.runner.get_command('show system')
+ self.facts['_version'] = self.parse_version(data)
+
+ if self.module.params['config']:
+ self.facts['_config'] = self.runner.get_command('show running-config')
+
+ def parse_version(self, data):
+ match = re.search(r'OpenSwitch Version\s+: (\S+)', data)
if match:
- facts['version'] = match.group(1)
- return facts
- return dict()
+ return match.group(1)
+ def get_endpoints(self):
+ responses = list()
+ urls = self.module.params['endpoints'] or list()
+ for ep in urls:
+ response = self.module.connection.get(ep)
+ if response.headers['status'] != 200:
+ self.module.fail_json(msg=response.headers['msg'])
+ responses.append(response.json)
+ return responses
-def main():
- """ main entry point for module execution
- """
+ def get_system(self):
+ response = self.module.connection.get('/system')
+ return dict(
+ _hostname=response.json['configuration']['hostname'],
+ _version=response.json['status']['switch_version']
+ )
+
+ def get_config(self):
+ response = self.module.connection.get('/system/full-configuration')
+ return response.json
+
+def check_args(module, warnings):
+ if module.params['transport'] != 'rest' and module.params['endpoints']:
+ warnings.append('Endpoints can only be collected when transport is '
+ 'set to "rest". Endpoints will not be collected')
+
+
+FACT_SUBSETS = dict(
+ default=Default,
+ config=Config,
+ legacy=Legacy
+)
+
+VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
+def main():
spec = dict(
- endpoints=dict(type='list'),
+ gather_subset=dict(default=['!config'], type='list'),
+
+ # the next two arguments are legacy from pre 2.2 ops_facts
+ # these will be deprecated and ultimately removed
config=dict(default=False, type='bool'),
+ endpoints=dict(type='list'),
+
transport=dict(default='cli', choices=['cli', 'rest'])
)
- module = get_module(argument_spec=spec,
- supports_check_mode=True)
+ module = NetworkModule(argument_spec=spec, supports_check_mode=True)
+
+ gather_subset = module.params['gather_subset']
+
+ warnings = list()
+ check_args(module, warnings)
+
+ runable_subsets = set()
+ exclude_subsets = set()
+
+ for subset in gather_subset:
+ if subset == 'all':
+ runable_subsets.update(VALID_SUBSETS)
+ continue
+
+ if subset.startswith('!'):
+ subset = subset[1:]
+ if subset == 'all':
+ exclude_subsets.update(VALID_SUBSETS)
+ continue
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in VALID_SUBSETS:
+ module.fail_json(msg='Bad subset')
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(VALID_SUBSETS)
+
+ runable_subsets.difference_update(exclude_subsets)
+ runable_subsets.add('default')
+ runable_subsets.add('legacy')
- endpoints = module.params['endpoints'] or list()
- if endpoints and not module.params['transport'] == 'rest':
- module.fail_json(msg="endpoints argument can only be used "
- "with transport `rest`")
+ facts = dict()
+ facts['gather_subset'] = list(runable_subsets)
- result = dict(changed=False)
+ runner = CommandRunner(module)
- facts = get_facts(module)
+ instances = list()
+ for key in runable_subsets:
+ instances.append(FACT_SUBSETS[key](module, runner))
- if module.params['config']:
- facts['config'] = get_config(module)
+ if module.params['transport'] == 'cli':
+ runner.run()
- responses = list()
- for ep in endpoints:
- response = get(module, ep)
- responses.append(response.json)
+ try:
+ for inst in instances:
+ inst.populate()
+ facts.update(inst.facts)
+ except Exception:
+ module.exit_json(out=module.from_json(runner.items))
- if responses:
- facts['endpoints'] = responses
+ ansible_facts = dict()
+ for key, value in iteritems(facts):
+ # this is to maintain capability with ops_facts 2.1
+ if key.startswith('_'):
+ ansible_facts[key[1:]] = value
+ else:
+ key = 'ansible_net_%s' % key
+ ansible_facts[key] = value
- result['ansible_facts'] = facts
- module.exit_json(**result)
+ module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
-from ansible.module_utils.basic import *
-from ansible.module_utils.openswitch import *
if __name__ == '__main__':
main()
diff --git a/network/sros/sros_command.py b/network/sros/sros_command.py
index 97342cf54ee..3c7dd21e203 100644
--- a/network/sros/sros_command.py
+++ b/network/sros/sros_command.py
@@ -16,24 +16,28 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: sros_command
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
-short_description: Run commands on remote devices running Nokia SROS
+short_description: Run commands on remote devices running Nokia SR OS
description:
- - Sends arbitrary commands to an SROS node and returns the results
+ - Sends arbitrary commands to an SR OS node and returns the results
read from the device. This module includes an argument that will
cause the module to wait for a specific condition before returning
or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
- Please use M(sros_config) to configure SROS devices.
+ Please use M(sros_config) to configure SR OS devices.
extends_documentation_fragment: sros
options:
commands:
description:
- - List of commands to send to the remote SROS device over the
+ - List of commands to send to the remote SR OS device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
@@ -92,20 +96,20 @@
- name: run show version on remote devices
sros_command:
commands: show version
- provider "{{ cli }}"
+ provider: "{{ cli }}"
- name: run show version and check to see if output contains sros
sros_command:
commands: show version
wait_for: result[0] contains sros
- provider "{{ cli }}"
+ provider: "{{ cli }}"
- name: run multiple commands on remote nodes
sros_command:
commands:
- show version
- show port detail
- provider "{{ cli }}"
+ provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
sros_command:
diff --git a/network/sros/sros_config.py b/network/sros/sros_config.py
index e5b95a7705d..2efca84dc12 100644
--- a/network/sros/sros_config.py
+++ b/network/sros/sros_config.py
@@ -16,16 +16,20 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: sros_config
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
-short_description: Manage Nokia SROS device configuration
+short_description: Manage Nokia SR OS device configuration
description:
- - Nokia SROS configurations use a simple block indent file syntax
+ - Nokia SR OS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
- an implementation for working with SROS configuration sections in
+ an implementation for working with SR OS configuration sections in
a deterministic way.
extends_documentation_fragment: sros
options:
@@ -63,7 +67,7 @@
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
- against the system
+ against the system.
required: false
default: null
after:
@@ -94,7 +98,7 @@
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
- line is not correct
+ line is not correct.
required: false
default: line
choices: ['line', 'block']
@@ -105,36 +109,12 @@
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
- the equivalient, set the match argument to none. This argument
+ the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: [ "true", "false" ]
version_added: "2.2"
- update:
- description:
- - The I(update) argument controls how the configuration statements
- are processed on the remote device. Valid choices for the I(update)
- argument are I(merge) and I(check). When the argument is set to
- I(merge), the configuration changes are merged with the current
- device running configuration. When the argument is set to I(check)
- the configuration updates are determined but not actually configured
- on the remote device.
- required: false
- default: merge
- choices: ['merge', 'check']
- version_added: "2.2"
- commit:
- description:
- - This argument specifies the update method to use when applying the
- configuration changes to the remote node. If the value is set to
- I(merge) the configuration updates are merged with the running-
- config. If the value is set to I(check), no changes are made to
- the remote host.
- required: false
- default: merge
- choices: ['merge', 'check']
- version_added: "2.2"
backup:
description:
- This argument will cause the module to create a full backup of
@@ -155,7 +135,7 @@
required: false
default: null
version_added: "2.2"
- default:
+ defaults:
description:
- This argument specifies whether or not to collect all defaults
when getting the remote device running config. When enabled,
@@ -164,6 +144,7 @@
required: false
default: no
choices: ['yes', 'no']
+ aliases: ['detail']
version_added: "2.2"
save:
description:
@@ -174,15 +155,6 @@
default: no
choices: ['yes', 'no']
version_added: "2.2"
- state:
- description:
- - This argument specifies whether or not the running-config is
- present on the remote device. When set to I(absent) the
- running-config on the remote device is erased.
- required: false
- default: no
- choices: ['yes', 'no']
- version_added: "2.2"
"""
EXAMPLES = """
@@ -218,7 +190,7 @@
- name: load config from file
sros_config:
- src: {{ inventory_hostname }}.cfg
+ src: "{{ inventory_hostname }}.cfg"
provider: "{{ cli }}"
save: yes
"""
@@ -235,22 +207,9 @@
type: path
sample: /playbooks/ansible/backup/sros_config.2016-07-16@22:28:34
"""
-import re
-
from ansible.module_utils.basic import get_exception
from ansible.module_utils.sros import NetworkModule, NetworkError
from ansible.module_utils.netcfg import NetworkConfig, dumps
-from ansible.module_utils.netcli import Command
-
-def invoke(name, *args, **kwargs):
- func = globals().get(name)
- if func:
- return func(*args, **kwargs)
-
-def check_args(module, warnings):
- if module.params['parents']:
- if not module.params['lines'] or module.params['src']:
- warnings.append('ignoring unnecessary argument parents')
def sanitize_config(lines):
commands = list()
@@ -263,10 +222,10 @@ def sanitize_config(lines):
return commands
def get_config(module, result):
- contents = module.params['config'] or result.get('__config__')
+ contents = module.params['config']
if not contents:
- contents = module.config.get_config()
- result['__config__'] = contents
+ defaults = module.params['defaults']
+ contents = module.config.get_config(detail=defaults)
return NetworkConfig(device_os='sros', contents=contents)
def get_candidate(module):
@@ -278,12 +237,7 @@ def get_candidate(module):
candidate.add(module.params['lines'], parents=parents)
return candidate
-def revert_config(module):
- if result.get('__checkpoint__'):
- module.cli(['admin rollback revert latest-rb',
- 'admin rollback delete latest-rb'])
-
-def present(module, result):
+def run(module, result):
match = module.params['match']
candidate = get_candidate(module)
@@ -292,7 +246,6 @@ def present(module, result):
config = get_config(module, result)
configobjs = candidate.difference(config)
else:
- config = None
configobjs = candidate.items
if configobjs:
@@ -301,58 +254,40 @@ def present(module, result):
result['updates'] = commands
- if module.params['update'] != 'check':
- # check if creating checkpoints is possible
- config = module.config.get_config()
- if 'rollback-location' not in config:
- warn = 'Cannot create checkpoint. Please enable this feature ' \
- 'with "configure system rollback rollback-location" ' \
- 'command. Automatic rollback will be disabled'
- result['warnings'].append(warn)
- result['__checkpoint__'] = False
- else:
- result['__checkpoint__'] = True
-
- # create a config checkpoint prior to trying to
- # configure the device
- if result.get('__checkpoint__'):
- module.cli(['admin rollback save'])
-
- # send the configuration commands to the device and merge
- # them with the current running config
- if not module.check_mode:
- module.config(commands)
- result['changed'] = True
-
- # remove checkpoint from system
- if result.get('__checkpoint__'):
- module.cli(['admin rollback delete latest-rb'])
-
- if module.params['save'] and not module.check_mode:
- module.config.save_config()
-
-def absent(module, result):
- if not module.check_mode:
- module.cli('write erase')
- result['changed'] = True
+ # check if creating checkpoints is possible
+ if not module.connection.rollback_enabled:
+ warn = 'Cannot create checkpoint. Please enable this feature ' \
+ 'using the sros_rollback module. Automatic rollback ' \
+ 'will be disabled'
+ result['warnings'].append(warn)
-def main():
+ # send the configuration commands to the device and merge
+ # them with the current running config
+ if not module.check_mode:
+ module.config.load_config(commands)
+ result['changed'] = True
+ if module.params['save']:
+ if not module.check_mode:
+ module.config.save_config()
+ result['changed'] = True
+
+def main():
+ """ main entry point for module execution
+ """
argument_spec = dict(
+ src=dict(type='path'),
+
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
- src=dict(type='path'),
-
match=dict(default='line', choices=['line', 'none']),
- update=dict(choices=['merge', 'check'], default='merge'),
- backup=dict(type='bool', default=False),
config=dict(),
- default=dict(type='bool', default=False),
- save=dict(type='bool', default=False),
+ defaults=dict(type='bool', default=False, aliases=['detail']),
- state=dict(choices=['present', 'absent'], default='present')
+ backup=dict(type='bool', default=False),
+ save=dict(type='bool', default=False),
)
mutually_exclusive = [('lines', 'src')]
@@ -362,22 +297,16 @@ def main():
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
- state = module.params['state']
-
- warnings = list()
- check_args(module, warnings)
-
- result = dict(changed=False, warnings=warnings)
+ result = dict(changed=False, warnings=list())
if module.params['backup']:
result['__backup__'] = module.config.get_config()
try:
- invoke(state, module, result)
+ run(module, result)
except NetworkError:
- revert_config(module)
exc = get_exception()
- module.fail_json(msg=str(exc))
+ module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**result)
diff --git a/network/sros/sros_rollback.py b/network/sros/sros_rollback.py
new file mode 100644
index 00000000000..85d7bdc5067
--- /dev/null
+++ b/network/sros/sros_rollback.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: sros_rollback
+version_added: "2.2"
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Nokia SR OS rollback
+description:
+ - Configure the rollback feature on remote Nokia devices running
+ the SR OS operating system. this module provides a stateful
+ implementation for managing the configuration of the rollback
+ feature
+extends_documentation_fragment: sros
+options:
+ rollback_location:
+ description:
+ - The I(rollback_location) specifies the location and filename
+ of the rollback checkpoint files. This argument supports any
+ valid local or remote URL as specified in SR OS
+ required: false
+ default: null
+ remote_max_checkpoints:
+ description:
+ - The I(remote_max_checkpoints) argument configures the maximum
+ number of rollback files that can be transfered and saved to
+ a remote location. Valid values for this argument are in the
+ range of 1 to 50
+ required: false
+ default: null
+ local_max_checkpoints:
+ description:
+ - The I(local_max_checkpoints) argument configures the maximum
+ number of rollback files that can be saved on the devices local
+ compact flash. Valid values for this argument are in the range
+ of 1 to 50
+ required: false
+ default: null
+ rescue_location:
+ description:
+ - The I(rescue_location) specifies the location of the
+ rescue file. This argument supports any valid local
+ or remote URL as specified in SR OS
+ required: false
+ default: null
+ state:
+ description:
+ - The I(state) argument specifies the state of the configuration
+ entries in the devices active configuration. When the state
+ value is set to C(true) the configuration is present in the
+ devices active configuration. When the state value is set to
+ C(false) the configuration values are removed from the devices
+ active configuration.
+ required: false
+ default: present
+ choices: ['present', 'absent']
+"""
+
+EXAMPLES = """
+# Note: examples below use the following provider dict to handle
+# transport and authentication to the node.
+vars:
+ cli:
+ host: "{{ inventory_hostname }}"
+ username: admin
+ password: admin
+ transport: cli
+
+- name: configure rollback location
+ sros_rollback:
+ rollback_location: "cb3:/ansible"
+ provider: "{{ cli }}"
+
+- name: remove all rollback configuration
+ sros_rollback:
+ state: absent
+ provider: "{{ cli }}"
+"""
+
+RETURN = """
+updates:
+ description: The set of commands that will be pushed to the remote device
+ returned: always
+ type: list
+ sample: ['...', '...']
+"""
+from ansible.module_utils.basic import get_exception
+from ansible.module_utils.sros import NetworkModule, NetworkError
+from ansible.module_utils.netcfg import NetworkConfig, dumps
+
+def invoke(name, *args, **kwargs):
+ func = globals().get(name)
+ if func:
+ return func(*args, **kwargs)
+
+def sanitize_config(lines):
+ commands = list()
+ for line in lines:
+ for index, entry in enumerate(commands):
+ if line.startswith(entry):
+ del commands[index]
+ break
+ commands.append(line)
+ return commands
+
+def present(module, commands):
+ setters = set()
+ for key, value in module.argument_spec.iteritems():
+ if module.params[key] is not None:
+ setter = value.get('setter') or 'set_%s' % key
+ if setter not in setters:
+ setters.add(setter)
+ invoke(setter, module, commands)
+
+def absent(module, commands):
+ config = module.config.get_config()
+ if 'rollback-location' in config:
+ commands.append('configure system rollback no rollback-location')
+ if 'rescue-location' in config:
+ commands.append('configure system rollback no rescue-location')
+ if 'remote-max-checkpoints' in config:
+ commands.append('configure system rollback no remote-max-checkpoints')
+ if 'local-max-checkpoints' in config:
+ commands.append('configure system rollback no remote-max-checkpoints')
+
+def set_rollback_location(module, commands):
+ value = module.params['rollback_location']
+ commands.append('configure system rollback rollback-location "%s"' % value)
+
+def set_local_max_checkpoints(module, commands):
+ value = module.params['local_max_checkpoints']
+ if not 1 <= value <= 50:
+ module.fail_json(msg='local_max_checkpoints must be between 1 and 50')
+ commands.append('configure system rollback local-max-checkpoints %s' % value)
+
+def set_remote_max_checkpoints(module, commands):
+ value = module.params['remote_max_checkpoints']
+ if not 1 <= value <= 50:
+ module.fail_json(msg='remote_max_checkpoints must be between 1 and 50')
+ commands.append('configure system rollback remote-max-checkpoints %s' % value)
+
+def set_rescue_location(module, commands):
+ value = module.params['rescue_location']
+ commands.append('configure system rollback rescue-location "%s"' % value)
+
+def get_config(module):
+ contents = module.config.get_config()
+ return NetworkConfig(device_os='sros', contents=contents)
+
+def load_config(module, commands, result):
+ candidate = NetworkConfig(device_os='sros', contents='\n'.join(commands))
+ config = get_config(module)
+ configobjs = candidate.difference(config)
+
+ if configobjs:
+ commands = dumps(configobjs, 'lines')
+ commands = sanitize_config(commands.split('\n'))
+
+ result['updates'] = commands
+
+ # send the configuration commands to the device and merge
+ # them with the current running config
+ if not module.check_mode:
+ module.config(commands)
+
+ result['changed'] = True
+
+def main():
+ """ main entry point for module execution
+ """
+ argument_spec = dict(
+ rollback_location=dict(),
+
+ local_max_checkpoints=dict(type='int'),
+ remote_max_checkpoints=dict(type='int'),
+
+ rescue_location=dict(),
+
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ module = NetworkModule(argument_spec=argument_spec,
+ connect_on_load=False,
+ supports_check_mode=True)
+
+ state = module.params['state']
+
+ result = dict(changed=False)
+
+ commands = list()
+ invoke(state, module, commands)
+
+ try:
+ load_config(module, commands, result)
+ except NetworkError:
+ exc = get_exception()
+ module.fail_json(msg=str(exc), **exc.kwargs)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/vyos/vyos_command.py b/network/vyos/vyos_command.py
index 49e41b6127c..14180e305d7 100644
--- a/network/vyos/vyos_command.py
+++ b/network/vyos/vyos_command.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: vyos_command
@@ -28,6 +32,10 @@
to validate key parameters before returning successfully. If the
conditional statements are not met in the wait period, the task
fails.
+ - Certain C(show) commands in VyOS produce many lines of output and
+ use a custom pager that can cause this module to hang. If the
+ value of the environment variable C(ANSIBLE_VYOS_TERMINAL_LENGTH)
+ is not set, the default number of 10000 is used.
extends_documentation_fragment: vyos
options:
commands:
@@ -43,15 +51,15 @@
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
- before moving forward. If the conditional is not true
- by the configured I(retries), the task fails. See examples.
+ before moving forward. If the conditional is not true
+ by the configured I(retries), the task fails. See examples.
required: false
default: null
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
- I(wait_for) argument to specify the match policy. Valid
+ I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
@@ -75,6 +83,10 @@
trying the command again.
required: false
default: 1
+
+notes:
+ - Running C(show system boot-messages all) will cause the module to hang since
+ VyOS is using a custom pager setting to display the output of that command.
"""
EXAMPLES = """
@@ -117,7 +129,7 @@
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The conditionals that failed
- retured: failed
+ returned: failed
type: list
sample: ['...', '...']
warnings:
@@ -126,22 +138,24 @@
type: list
sample: ['...', '...']
"""
+import ansible.module_utils.vyos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner
from ansible.module_utils.netcli import AddCommandError, FailedConditionsError
-from ansible.module_utils.vyos import NetworkModule, NetworkError
+from ansible.module_utils.network import NetworkModule, NetworkError
+from ansible.module_utils.six import string_types
VALID_KEYS = ['command', 'output', 'prompt', 'response']
def to_lines(stdout):
for item in stdout:
- if isinstance(item, basestring):
+ if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module):
for cmd in module.params['commands']:
- if isinstance(cmd, basestring):
+ if isinstance(cmd, string_types):
cmd = dict(command=cmd, output=None)
elif 'command' not in cmd:
module.fail_json(msg='command keyword argument is required')
diff --git a/network/vyos/vyos_config.py b/network/vyos/vyos_config.py
index 39d9be7e4e5..8cf2c3d6047 100644
--- a/network/vyos/vyos_config.py
+++ b/network/vyos/vyos_config.py
@@ -16,6 +16,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: vyos_config
@@ -56,17 +60,6 @@
required: false
default: line
choices: ['line', 'none']
- update:
- description:
- - The C(update) argument controls the method used to update the
- remote device configuration. This argument accepts two valid
- options, C(merge) or C(check). When C(merge) is specified, the
- configuration is merged into the current active config. When
- C(check) is specified, the module returns the set of updates
- that would be applied to the active configuration.
- required: false
- default: merge
- choices: ['merge', 'check']
backup:
description:
- The C(backup) argument will backup the current devices active
@@ -100,15 +93,6 @@
required: false
default: false
choices: ['yes', 'no']
- state:
- description:
- - The C(state) argument controls the existing state of the config
- file on disk. When set to C(present), the configuration should
- exist on disk and when set to C(absent) the configuration file
- is removed. This only applies to the startup configuration.
- required: false
- default: present
- choices: ['present', 'absent']
"""
RETURN = """
@@ -117,7 +101,7 @@
returned: always
type: list
sample: ['...', '...']
-removed:
+filtered:
description: The list of configuration commands removed to avoid a load failure
returned: always
type: list
@@ -162,16 +146,6 @@
]
-def invoke(name, *args, **kwargs):
- func = globals().get(name)
- if func:
- return func(*args, **kwargs)
-
-def check_args(module, warnings):
- if module.params['save'] and module.params['update'] == 'check':
- warnings.append('The configuration will not be saved when update '
- 'is set to check')
-
def config_to_commands(config):
set_format = config.startswith('set') or config.startswith('delete')
candidate = NetworkConfig(indent=4, contents=config, device_os='junos')
@@ -195,7 +169,6 @@ def get_config(module, result):
contents = module.params['config']
if not contents:
contents = module.config.get_config(output='set').split('\n')
-
else:
contents = config_to_commands(contents)
@@ -237,11 +210,11 @@ def diff_config(commands, config):
return list(updates)
def sanitize_config(config, result):
- result['removed'] = list()
+ result['filtered'] = list()
for regex in CONFIG_FILTERS:
for index, line in enumerate(list(config)):
if regex.search(line):
- result['removed'].append(line)
+ result['filtered'].append(line)
del config[index]
def load_config(module, commands, result):
@@ -262,7 +235,7 @@ def load_config(module, commands, result):
result['changed'] = True
-def present(module, result):
+def run(module, result):
# get the current active config from the node or passed in via
# the config param
config = get_config(module, result)
@@ -275,37 +248,27 @@ def present(module, result):
result['updates'] = updates
- if module.params['update'] != 'check':
- load_config(module, updates, result)
-
- if result.get('removed'):
- result['warnings'].append('Some configuration commands where '
- 'removed, please see the removed key')
-
+ load_config(module, updates, result)
-def absent(module, result):
- if not module.check_mode:
- module.cli('rm /config/config.boot')
- result['changed'] = True
+ if result.get('filtered'):
+ result['warnings'].append('Some configuration commands where '
+ 'removed, please see the filtered key')
def main():
argument_spec = dict(
- lines=dict(type='list'),
src=dict(type='path'),
-
+ lines=dict(type='list'),
match=dict(default='line', choices=['line', 'none']),
- update=dict(default='merge', choices=['merge', 'check']),
- backup=dict(default=False, type='bool'),
comment=dict(default=DEFAULT_COMMENT),
config=dict(),
- save=dict(default=False, type='bool'),
- state=dict(choices=['present', 'absent'], default='present')
+ backup=dict(default=False, type='bool'),
+ save=dict(default=False, type='bool'),
)
mutually_exclusive = [('lines', 'src')]
@@ -315,18 +278,13 @@ def main():
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
- state = module.params['state']
-
- warnings = list()
- check_args(module, warnings)
-
- result = dict(changed=False, warnings=warnings)
+ result = dict(changed=False)
if module.params['backup']:
result['__backup__'] = module.config.get_config()
try:
- invoke(state, module, result)
+ run(module, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
diff --git a/network/vyos/vyos_facts.py b/network/vyos/vyos_facts.py
index 3c09f36505a..ff081f8abc5 100644
--- a/network/vyos/vyos_facts.py
+++ b/network/vyos/vyos_facts.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: vyos_facts
@@ -35,7 +39,7 @@
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
- with an initial M(!) to specify that a specific subset should
+ with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: "!config"
@@ -57,7 +61,7 @@
- name: collect only the config and default facts
vyos_facts:
- gather_subset:config
+ gather_subset: config
- name: collect everything exception the config
vyos_facts:
@@ -70,7 +74,7 @@
returned: when config is configured
type: str
ansible_net_commits:
- descrption: The set of available configuration revisions
+ description: The set of available configuration revisions
returned: when present
type: list
ansible_net_hostname:
@@ -100,8 +104,11 @@
"""
import re
-from ansible.module_utils.netcmd import CommandRunner
-from ansible.module_utils.vyos import NetworkModule
+import ansible.module_utils.vyos
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.netcli import CommandRunner
+from ansible.module_utils.network import NetworkModule
+from ansible.module_utils.six import iteritems
class FactsBase(object):
@@ -112,6 +119,9 @@ def __init__(self, runner):
self.commands()
+ def commands(self):
+ raise NotImplementedError
+
class Default(FactsBase):
@@ -160,7 +170,7 @@ def populate(self):
entry = None
for line in commits.split('\n'):
- match = re.match('(\d+)\s+(.+)by(.+)via(.+)', line)
+ match = re.match(r'(\d+)\s+(.+)by(.+)via(.+)', line)
if match:
if entry:
entries.append(entry)
@@ -288,7 +298,7 @@ def main():
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](runner))
- runner.run_commands()
+ runner.run()
try:
for inst in instances:
@@ -299,7 +309,7 @@ def main():
module.fail_json(msg='unknown failure', output=runner.items, exc=str(exc))
ansible_facts = dict()
- for key, value in facts.iteritems():
+ for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
diff --git a/packaging/language/easy_install.py b/packaging/language/easy_install.py
index 017f6b818a6..40b1026a76a 100644
--- a/packaging/language/easy_install.py
+++ b/packaging/language/easy_install.py
@@ -22,6 +22,10 @@
import tempfile
import os.path
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: easy_install
@@ -90,10 +94,14 @@
EXAMPLES = '''
# Examples from Ansible Playbooks
-- easy_install: name=pip state=latest
+- easy_install:
+ name: pip
+ state: latest
# Install Bottle into the specified virtualenv.
-- easy_install: name=bottle virtualenv=/webapps/myapp/venv
+- easy_install:
+ name: bottle
+ virtualenv: /webapps/myapp/venv
'''
def _is_package_installed(module, name, easy_install, executable_arguments):
@@ -200,4 +208,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/language/gem.py b/packaging/language/gem.py
index acd088dc0c5..41a0961b2af 100644
--- a/packaging/language/gem.py
+++ b/packaging/language/gem.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: gem
@@ -97,13 +101,21 @@
EXAMPLES = '''
# Installs version 1.0 of vagrant.
-- gem: name=vagrant version=1.0 state=present
+- gem:
+ name: vagrant
+ version: 1.0
+ state: present
# Installs latest available version of rake.
-- gem: name=rake state=latest
+- gem:
+ name: rake
+ state: latest
# Installs rake version 1.0 from a local gem on disk.
-- gem: name=rake gem_source=/path/to/gems/rake-1.0.gem state=present
+- gem:
+ name: rake
+ gem_source: /path/to/gems/rake-1.0.gem
+ state: present
'''
import re
@@ -265,4 +277,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/language/pip.py b/packaging/language/pip.py
index 466c7ea8875..ddf6e4af013 100755
--- a/packaging/language/pip.py
+++ b/packaging/language/pip.py
@@ -19,10 +19,9 @@
# along with Ansible. If not, see .
#
-import tempfile
-import re
-import os
-import sys
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -46,8 +45,8 @@
default: null
requirements:
description:
- - The path to a pip requirements file, which should be local to the remote system.
- File can be specified as a relative path if using the chdir option.
+ - The path to a pip requirements file, which should be local to the remote system.
+ File can be specified as a relative path if using the chdir option.
required: false
default: null
virtualenv:
@@ -83,8 +82,8 @@
version_added: "2.0"
description:
- The Python executable used for creating the virtual environment.
- For example C(python3.4), C(python2.7). When not specified, the
- system Python version is used.
+ For example C(python3.5), C(python2.7). When not specified, the
+ Python version used to run the ansible module is used.
required: false
default: null
state:
@@ -119,6 +118,8 @@
example C(pip-3.3), if there are both Python 2.7 and 3.3 installations
in the system and you want to run pip for the Python 3.3 installation.
It cannot be specified together with the 'virtualenv' parameter (added in 2.1).
+ By default, it will take the appropriate version for the python interpreter
+ use by ansible, e.g. pip3 on python 3, and pip2 or pip on python 2.
version_added: "1.3"
required: false
default: null
@@ -137,55 +138,100 @@
- Please note that virtualenv (U(http://www.virtualenv.org/)) must be
installed on the remote host if the virtualenv parameter is specified and
the virtualenv needs to be created.
+ - By default, this module will use the appropriate version of pip for the
+ interpreter used by ansible (e.g. pip3 when using python 3, pip2 otherwise)
requirements: [ "virtualenv", "pip" ]
author: "Matt Wright (@mattupstate)"
'''
EXAMPLES = '''
# Install (Bottle) python package.
-- pip: name=bottle
+- pip:
+ name: bottle
# Install (Bottle) python package on version 0.11.
-- pip: name=bottle version=0.11
+- pip:
+ name: bottle
+ version: 0.11
# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.
-- pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp'
+- pip:
+ name: svn+http://myrepo/svn/MyApp#
+ egg: MyApp'
# Install MyApp using one of the remote protocols (bzr+,hg+,git+) in a non editable way.
-- pip: name='git+http://myrepo/app/MyApp' editable=false
+- pip:
+ name: git+http://myrepo/app/MyApp
+ editable: false
# Install (MyApp) from local tarball
-- pip: name='file:///path/to/MyApp.tar.gz'
+- pip:
+ name: file:///path/to/MyApp.tar.gz
# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules
-- pip: name=bottle virtualenv=/my_app/venv
+- pip:
+ name: bottle
+ virtualenv: /my_app/venv
# Install (Bottle) into the specified (virtualenv), inheriting globally installed modules
-- pip: name=bottle virtualenv=/my_app/venv virtualenv_site_packages=yes
+- pip:
+ name: bottle
+ virtualenv: /my_app/venv
+ virtualenv_site_packages: yes
# Install (Bottle) into the specified (virtualenv), using Python 2.7
-- pip: name=bottle virtualenv=/my_app/venv virtualenv_command=virtualenv-2.7
+- pip:
+ name: bottle
+ virtualenv: /my_app/venv
+ virtualenv_command: virtualenv-2.7
# Install specified python requirements.
-- pip: requirements=/my_app/requirements.txt
+- pip:
+ requirements: /my_app/requirements.txt
# Install specified python requirements in indicated (virtualenv).
-- pip: requirements=/my_app/requirements.txt virtualenv=/my_app/venv
+- pip:
+ requirements: /my_app/requirements.txt
+ virtualenv: /my_app/venv
# Install specified python requirements and custom Index URL.
-- pip: requirements=/my_app/requirements.txt extra_args='-i https://example.com/pypi/simple'
+- pip:
+ requirements: /my_app/requirements.txt
+ extra_args: -i https://example.com/pypi/simple
# Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable.
-- pip: name=bottle executable=pip-3.3
+- pip:
+ name: bottle
+ executable: pip-3.3
# Install (Bottle), forcing reinstallation if it's already installed
-- pip: name=bottle state=forcereinstall
+- pip:
+ name: bottle
+ state: forcereinstall
# Install (Bottle) while ensuring the umask is 0022 (to ensure other users can use it)
-- pip: name=bottle umask=0022
+- pip:
+ name: bottle
+ umask: 0022
become: True
'''
+import tempfile
+import re
+import os
+import sys
+
+from ansible.module_utils.basic import AnsibleModule, is_executable
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import PY3
+
+#: Python one-liners to be run at the command line that will determine the
+# installed version for these special libraries. These are libraries that
+# don't end up in the output of pip freeze.
+_SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)',
+ 'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'}
+
+
def _get_cmd_options(module, cmd):
thiscmd = cmd + " --help"
rc, stdout, stderr = module.run_command(thiscmd)
@@ -193,7 +239,7 @@ def _get_cmd_options(module, cmd):
module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr))
words = stdout.strip().split()
- cmd_options = [ x for x in words if x.startswith('--') ]
+ cmd_options = [x for x in words if x.startswith('--')]
return cmd_options
@@ -204,27 +250,59 @@ def _get_full_name(name, version=None):
resp = name + '==' + version
return resp
-def _is_present(name, version, installed_pkgs):
+
+def _get_packages(module, pip, chdir):
+ '''Return results of pip command to get packages.'''
+ # Try 'pip list' command first.
+ command = '%s list' % pip
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
+
+ # If there was an error (pip version too old) then use 'pip freeze'.
+ if rc != 0:
+ command = '%s freeze' % pip
+ rc, out, err = module.run_command(command, cwd=chdir)
+ if rc != 0:
+ _fail(module, command, out, err)
+
+ return (command, out, err)
+
+
+def _is_present(name, version, installed_pkgs, pkg_command):
+ '''Return whether or not package is installed.'''
for pkg in installed_pkgs:
- if '==' not in pkg:
+ # Package listing will be different depending on which pip
+ # command was used ('pip list' vs. 'pip freeze').
+ if 'list' in pkg_command:
+ pkg = pkg.replace('(', '').replace(')', '')
+ if ',' in pkg:
+ pkg_name, pkg_version, _ = pkg.replace(',', '').split(' ')
+ else:
+ pkg_name, pkg_version = pkg.split(' ')
+ elif 'freeze' in pkg_command:
+ if '==' in pkg:
+ pkg_name, pkg_version = pkg.split('==')
+ else:
+ continue
+ else:
continue
- [pkg_name, pkg_version] = pkg.split('==')
-
if pkg_name == name and (version is None or version == pkg_version):
return True
return False
-
def _get_pip(module, env=None, executable=None):
- # On Debian and Ubuntu, pip is pip.
- # On Fedora18 and up, pip is python-pip.
- # On Fedora17 and below, CentOS and RedHat 6 and 5, pip is pip-python.
- # On Fedora, CentOS, and RedHat, the exception is in the virtualenv.
- # There, pip is just pip.
- candidate_pip_basenames = ['pip', 'python-pip', 'pip-python']
+ # Older pip only installed under the "/usr/bin/pip" name. Many Linux
+ # distros install it there.
+ # By default, we try to use pip required for the current python
+ # interpreter, so people can use pip to install modules dependencies
+ candidate_pip_basenames = ('pip2', 'pip')
+ if PY3:
+ # pip under python3 installs the "/usr/bin/pip3" name
+ candidate_pip_basenames = ('pip3',)
+
pip = None
if executable is not None:
executable = os.path.expanduser(executable)
@@ -232,22 +310,39 @@ def _get_pip(module, env=None, executable=None):
pip = executable
else:
# If you define your own executable that executable should be the only candidate.
- candidate_pip_basenames = [executable]
+ # As noted in the docs, executable doesn't work with virtualenvs.
+ candidate_pip_basenames = (executable,)
+
if pip is None:
if env is None:
opt_dirs = []
+ for basename in candidate_pip_basenames:
+ pip = module.get_bin_path(basename, False, opt_dirs)
+ if pip is not None:
+ break
+ else:
+ # For-else: Means that we did not break out of the loop
+ # (therefore, that pip was not found)
+ module.fail_json(msg='Unable to find any of %s to use. pip'
+ ' needs to be installed.' % ', '.join(candidate_pip_basenames))
else:
- # Try pip with the virtualenv directory first.
- opt_dirs = ['%s/bin' % env]
- for basename in candidate_pip_basenames:
- pip = module.get_bin_path(basename, False, opt_dirs)
- if pip is not None:
- break
- # pip should have been found by now. The final call to get_bin_path will
- # trigger fail_json.
- if pip is None:
- basename = candidate_pip_basenames[0]
- pip = module.get_bin_path(basename, True, opt_dirs)
+ # If we're using a virtualenv we must use the pip from the
+ # virtualenv
+ venv_dir = os.path.join(env, 'bin')
+ candidate_pip_basenames = (candidate_pip_basenames[0], 'pip')
+ for basename in candidate_pip_basenames:
+ candidate = os.path.join(venv_dir, basename)
+ if os.path.exists(candidate) and is_executable(candidate):
+ pip = candidate
+ break
+ else:
+ # For-else: Means that we did not break out of the loop
+ # (therefore, that pip was not found)
+ module.fail_json(msg='Unable to find pip in the virtualenv,'
+ ' %s, under any of these names: %s. Make sure pip is'
+ ' present in the virtualenv.' % (env,
+ ', '.join(candidate_pip_basenames)))
+
return pip
@@ -260,6 +355,31 @@ def _fail(module, cmd, out, err):
module.fail_json(cmd=cmd, msg=msg)
+def _get_package_info(module, package, env=None):
+ """This is only needed for special packages which do not show up in pip freeze
+
+ pip and setuptools fall into this category.
+
+ :returns: a string containing the version number if the package is
+ installed. None if the package is not installed.
+ """
+ if env:
+ opt_dirs = ['%s/bin' % env]
+ else:
+ opt_dirs = []
+ python_bin = module.get_bin_path('python', False, opt_dirs)
+
+ if python_bin is None:
+ formatted_dep = None
+ else:
+ rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
+ if rc:
+ formatted_dep = None
+ else:
+ formatted_dep = '%s==%s' % (package, out.strip())
+ return formatted_dep
+
+
def main():
state_map = dict(
present='install',
@@ -274,9 +394,9 @@ def main():
name=dict(type='list'),
version=dict(type='str'),
requirements=dict(),
- virtualenv=dict(),
+ virtualenv=dict(type='path'),
virtualenv_site_packages=dict(default=False, type='bool'),
- virtualenv_command=dict(default='virtualenv'),
+ virtualenv_command=dict(default='virtualenv', type='path'),
virtualenv_python=dict(type='str'),
use_mirrors=dict(default=True, type='bool'),
extra_args=dict(),
@@ -304,11 +424,10 @@ def main():
umask = int(umask, 8)
except Exception:
module.fail_json(msg="umask must be an octal integer",
- details=str(sys.exc_info()[1]))
-
+ details=to_native(sys.exc_info()[1]))
old_umask = None
- if umask != None:
+ if umask is not None:
old_umask = os.umask(umask)
try:
if state == 'latest' and version is not None:
@@ -316,23 +435,21 @@ def main():
if chdir is None:
# this is done to avoid permissions issues with privilege escalation and virtualenvs
- chdir = tempfile.gettempdir()
+ chdir = tempfile.gettempdir()
err = ''
out = ''
env = module.params['virtualenv']
- virtualenv_command = module.params['virtualenv_command']
if env:
- env = os.path.expanduser(env)
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
- cmd = os.path.expanduser(virtualenv_command)
+ cmd = module.params['virtualenv_command']
if os.path.basename(cmd) == cmd:
- cmd = module.get_bin_path(virtualenv_command, True)
+ cmd = module.get_bin_path(cmd, True)
if module.params['virtualenv_site_packages']:
cmd += ' --system-site-packages'
@@ -343,6 +460,14 @@ def main():
if virtualenv_python:
cmd += ' -p%s' % virtualenv_python
+ elif PY3:
+ # Ubuntu currently has a patch making virtualenv always
+ # try to use python2. Since Ubuntu16 works without
+ # python2 installed, this is a problem. This code mimics
+ # the upstream behaviour of using the python which invoked
+ # virtualenv to determine which python is used inside of
+ # the virtualenv (when none are specified).
+ cmd += ' -p%s' % sys.executable
cmd = "%s %s" % (cmd, env)
rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
@@ -393,35 +518,41 @@ def main():
if requirements:
cmd += ' -r %s' % requirements
-
if module.check_mode:
if extra_args or requirements or state == 'latest' or not name:
module.exit_json(changed=True)
elif has_vcs:
module.exit_json(changed=True)
- freeze_cmd = '%s freeze' % pip
-
- rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=chdir)
-
- if rc != 0:
- module.exit_json(changed=True)
+ pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir)
out += out_pip
err += err_pip
changed = False
if name:
+ pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p]
+
+ if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name):
+ # Older versions of pip (pre-1.3) do not have pip list.
+ # pip freeze does not list setuptools or pip in its output
+ # So we need to get those via a specialcase
+ for pkg in ('setuptools', 'pip'):
+ if pkg in name:
+ formatted_dep = _get_package_info(module, pkg, env)
+ if formatted_dep is not None:
+ pkg_list.append(formatted_dep)
+ out += '%s\n' % formatted_dep
+
for pkg in name:
- is_present = _is_present(pkg, version, out.split())
+ is_present = _is_present(pkg, version, pkg_list, pkg_cmd)
if (state == 'present' and not is_present) or (state == 'absent' and is_present):
changed = True
break
- module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err)
+ module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
if requirements or has_vcs:
- freeze_cmd = '%s freeze' % pip
- out_freeze_before = module.run_command(freeze_cmd, cwd=chdir)[1]
+ _, out_freeze_before, _ = _get_packages(module, pip, chdir)
else:
out_freeze_before = None
@@ -443,17 +574,15 @@ def main():
if out_freeze_before is None:
changed = 'Successfully installed' in out_pip
else:
- out_freeze_after = module.run_command(freeze_cmd, cwd=chdir)[1]
+ _, out_freeze_after, _ = _get_packages(module, pip, chdir)
changed = out_freeze_before != out_freeze_after
module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
state=state, requirements=requirements, virtualenv=env,
stdout=out, stderr=err)
finally:
- if old_umask != None:
+ if old_umask is not None:
os.umask(old_umask)
-# import module snippets
-from ansible.module_utils.basic import *
-
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/apt.py b/packaging/os/apt.py
index 596430600d0..5637680cd9a 100644
--- a/packaging/os/apt.py
+++ b/packaging/os/apt.py
@@ -19,6 +19,10 @@
# along with this software. If not, see .
#
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: apt
@@ -47,9 +51,9 @@
choices: [ "yes", "no" ]
cache_valid_time:
description:
- - If C(update_cache) is specified and the last run is less or equal than I(cache_valid_time) seconds ago, the C(update_cache) gets skipped.
+ - Update the apt cache if its older than the I(cache_valid_time). This option is set in seconds.
required: false
- default: no
+ default: 0
purge:
description:
- Will force purging of configuration files if the module state is set to I(absent).
@@ -116,8 +120,10 @@
required: false
default: false
version_added: "2.1"
-
-requirements: [ python-apt, aptitude ]
+requirements:
+ - python-apt (python 2)
+ - python3-apt (python 3)
+ - aptitude
author: "Matthew Williams (@mgwilliams)"
notes:
- Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) require C(aptitude), otherwise
@@ -125,44 +131,70 @@
'''
EXAMPLES = '''
-# Update repositories cache and install "foo" package
-- apt: name=foo update_cache=yes
-
-# Remove "foo" package
-- apt: name=foo state=absent
-
-# Install the package "foo"
-- apt: name=foo state=present
-
-# Install the version '1.00' of package "foo"
-- apt: name=foo=1.00 state=present
-
-# Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
-- apt: name=nginx state=latest default_release=squeeze-backports update_cache=yes
-
-# Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
-- apt: name=openjdk-6-jdk state=latest install_recommends=no
-
-# Update all packages to the latest version
-- apt: upgrade=dist
-
-# Run the equivalent of "apt-get update" as a separate step
-- apt: update_cache=yes
-
-# Only run "update_cache=yes" if the last one is more than 3600 seconds ago
-- apt: update_cache=yes cache_valid_time=3600
-
-# Pass options to dpkg on run
-- apt: upgrade=dist update_cache=yes dpkg_options='force-confold,force-confdef'
-
-# Install a .deb package
-- apt: deb=/tmp/mypackage.deb
-
-# Install the build dependencies for package "foo"
-- apt: pkg=foo state=build-dep
-
-# Install a .deb package from the internet.
-- apt: deb=https://example.com/python-ppq_0.1-1_all.deb
+- name: Update repositories cache and install "foo" package
+ apt:
+ name: foo
+ update_cache: yes
+
+- name: Remove "foo" package
+ apt:
+ name: foo
+ state: absent
+
+- name: Install the package "foo"
+ apt:
+ name: foo
+ state: present
+
+- name: Install the version '1.00' of package "foo"
+ apt:
+ name: foo=1.00
+ state: present
+
+- name: Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
+ apt:
+ name: nginx
+ state: latest
+ default_release: squeeze-backports
+ update_cache: yes
+
+- name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
+ apt:
+ name: openjdk-6-jdk
+ state: latest
+ install_recommends: no
+
+- name: Update all packages to the latest version
+ apt:
+ upgrade: dist
+
+- name: Run the equivalent of "apt-get update" as a separate step
+ apt:
+ update_cache: yes
+
+- name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago
+ apt:
+ update_cache: yes
+ cache_valid_time: 3600
+
+- name: Pass options to dpkg on run
+ apt:
+ upgrade: dist
+ update_cache: yes
+ dpkg_options: 'force-confold,force-confdef'
+
+- name: Install a .deb package
+ apt:
+ deb: /tmp/mypackage.deb
+
+- name: Install the build dependencies for package "foo"
+ apt:
+ pkg: foo
+ state: build-dep
+
+- name: Install a .deb package from the internet.
+ apt:
+ deb: https://example.com/python-ppq_0.1-1_all.deb
'''
RETURN = '''
@@ -188,15 +220,22 @@
sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..."
'''
-import traceback
# added to stave off future warnings about apt api
import warnings
warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
-import os
import datetime
import fnmatch
import itertools
+import os
+import re
+import sys
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
# APT related constants
APT_ENV_VARS = dict(
@@ -224,6 +263,12 @@
except ImportError:
HAS_PYTHON_APT = False
+if sys.version_info[0] < 3:
+ PYTHON_APT = 'python-apt'
+else:
+ PYTHON_APT = 'python3-apt'
+
+
def package_split(pkgspec):
parts = pkgspec.split('=', 1)
if len(parts) > 1:
@@ -231,6 +276,7 @@ def package_split(pkgspec):
else:
return parts[0], None
+
def package_versions(pkgname, pkg, pkg_cache):
try:
versions = set(p.version for p in pkg.versions)
@@ -243,17 +289,19 @@ def package_versions(pkgname, pkg, pkg_cache):
return versions
+
def package_version_compare(version, other_version):
try:
return apt_pkg.version_compare(version, other_version)
except AttributeError:
return apt_pkg.VersionCompare(version, other_version)
+
def package_status(m, pkgname, version, cache, state):
try:
# get the package from the cache, as well as the
- # the low-level apt_pkg.Package object which contains
- # state fields not directly acccesible from the
+ # low-level apt_pkg.Package object which contains
+ # state fields not directly accessible from the
# higher-level apt.package.Package object.
pkg = cache[pkgname]
ll_pkg = cache._cache[pkgname] # the low-level package object
@@ -325,6 +373,7 @@ def package_status(m, pkgname, version, cache, state):
return package_is_installed, package_is_upgradable, has_files
+
def expand_dpkg_options(dpkg_options_compressed):
options_list = dpkg_options_compressed.split(',')
dpkg_options = ""
@@ -333,6 +382,7 @@ def expand_dpkg_options(dpkg_options_compressed):
% (dpkg_options, dpkg_option)
return dpkg_options.strip()
+
def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
# Note: apt-get does implicit regex matching when an exact package name
# match is not found. Something like this:
@@ -350,16 +400,19 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
if frozenset('*?[]!').intersection(pkgname_pattern):
# handle multiarch pkgnames, the idea is that "apt*" should
# only select native packages. But "apt*:i386" should still work
- if not ":" in pkgname_pattern:
+ if ":" not in pkgname_pattern:
+ # Filter the multiarch packages from the cache only once
try:
pkg_name_cache = _non_multiarch
except NameError:
- pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if not ':' in pkg.name]
+ pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if ':' not in pkg.name] # noqa: F841
else:
+ # Create a cache of pkg_names including multiarch only once
try:
pkg_name_cache = _all_pkg_names
except NameError:
- pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache]
+ pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] # noqa: F841
+
matches = fnmatch.filter(pkg_name_cache, pkgname_pattern)
if len(matches) == 0:
@@ -371,8 +424,9 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
new_pkgspec.append(pkgspec_pattern)
return new_pkgspec
+
def parse_diff(output):
- diff = output.splitlines()
+ diff = to_native(output).splitlines()
try:
# check for start marker from aptitude
diff_start = diff.index('Resolving dependencies...')
@@ -385,13 +439,14 @@ def parse_diff(output):
diff_start = -1
try:
# check for end marker line from both apt-get and aptitude
- diff_end = (i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item)).next()
+ diff_end = next(i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item))
except StopIteration:
diff_end = len(diff)
diff_start += 1
diff_end += 1
return {'prepared': '\n'.join(diff[diff_start:diff_end])}
+
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
@@ -401,12 +456,13 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
for package in pkgspec:
- name, version = package_split(package)
- installed, upgradable, has_files = package_status(m, name, version, cache, state='install')
if build_dep:
# Let apt decide what to install
pkg_list.append("'%s'" % package)
continue
+
+ name, version = package_split(package)
+ installed, upgradable, has_files = package_status(m, name, version, cache, state='install')
if not installed or (upgrade and upgradable):
pkg_list.append("'%s'" % package)
if installed and upgradable and version:
@@ -469,13 +525,15 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
else:
return (True, dict(changed=False))
+
def get_field_of_deb(m, deb_file, field="Version"):
cmd_dpkg = m.get_bin_path("dpkg", True)
cmd = cmd_dpkg + " --field %s %s" % (deb_file, field)
rc, stdout, stderr = m.run_command(cmd)
if rc != 0:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
- return stdout.strip('\n')
+ return to_native(stdout).strip('\n')
+
def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated, dpkg_options):
changed=False
@@ -551,6 +609,7 @@ def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated
else:
m.exit_json(changed=changed, stdout=retvals.get('stdout',''), stderr=retvals.get('stderr',''), diff=retvals.get('diff', ''))
+
def remove(m, pkgspec, cache, purge=False, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False):
pkg_list = []
@@ -596,6 +655,7 @@ def remove(m, pkgspec, cache, purge=False, force=False,
m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err)
m.exit_json(changed=True, stdout=out, stderr=err, diff=diff)
+
def upgrade(m, mode="yes", force=False, default_release=None,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
if m.check_mode:
@@ -646,6 +706,7 @@ def upgrade(m, mode="yes", force=False, default_release=None,
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
+
def download(module, deb):
tempdir = os.path.dirname(__file__)
package = os.path.join(tempdir, str(deb.rsplit('/', 1)[1]))
@@ -672,12 +733,63 @@ def download(module, deb):
return deb
+
+def get_cache_mtime():
+ """Return mtime of a valid apt cache file.
+ Stat the apt cache file and if no cache file is found return 0
+ :returns: ``int``
+ """
+ if os.path.exists(APT_UPDATE_SUCCESS_STAMP_PATH):
+ return os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime
+ elif os.path.exists(APT_LISTS_PATH):
+ return os.stat(APT_LISTS_PATH).st_mtime
+ else:
+ return 0
+
+
+def get_updated_cache_time():
+ """Return the mtime time stamp and the updated cache time.
+ Always retrieve the mtime of the apt cache or set the `cache_mtime`
+ variable to 0
+ :returns: ``tuple``
+ """
+ cache_mtime = get_cache_mtime()
+ mtimestamp = datetime.datetime.fromtimestamp(cache_mtime)
+ updated_cache_time = int(time.mktime(mtimestamp.timetuple()))
+ return mtimestamp, updated_cache_time
+
+
+# https://github.com/ansible/ansible-modules-core/issues/2951
+def get_cache(module):
+ '''Attempt to get the cache object and update till it works'''
+ cache = None
+ try:
+ cache = apt.Cache()
+ except SystemError:
+ e = get_exception()
+ if '/var/lib/apt/lists/' in str(e).lower():
+ # update cache until files are fixed or retries exceeded
+ retries = 0
+ while retries < 2:
+ (rc, so, se) = module.run_command(['apt-get', 'update', '-q'])
+ retries += 1
+ if rc == 0:
+ break
+ if rc != 0:
+ module.fail_json(msg='Updating the cache to correct corrupt package lists failed:\n%s\n%s' % (str(e), str(so) + str(se)))
+ # try again
+ cache = apt.Cache()
+ else:
+ module.fail_json(msg=str(e))
+ return cache
+
+
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present', 'build-dep']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
- cache_valid_time = dict(type='int'),
+ cache_valid_time = dict(type='int', default=0),
purge = dict(default=False, type='bool'),
package = dict(default=None, aliases=['pkg', 'name'], type='list'),
deb = dict(default=None, type='path'),
@@ -699,16 +811,18 @@ def main():
if not HAS_PYTHON_APT:
if module.check_mode:
- module.fail_json(msg="python-apt must be installed to use check mode. If run normally this module can autoinstall it")
+ module.fail_json(msg="%s must be installed to use check mode. "
+ "If run normally this module can auto-install it." % PYTHON_APT)
try:
- module.run_command('apt-get update', check_rc=True)
- module.run_command('apt-get install python-apt -y -q', check_rc=True)
+ module.run_command(['apt-get', 'update'], check_rc=True)
+ module.run_command(['apt-get', 'install', PYTHON_APT, '-y', '-q'], check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
- module.fail_json(msg="Could not import python modules: apt, apt_pkg. Please install python-apt package.")
+ module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
+ "Please install %s package." % PYTHON_APT)
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
@@ -736,8 +850,10 @@ def main():
if p['state'] == 'removed':
p['state'] = 'absent'
+ # Get the cache object
+ cache = get_cache(module)
+
try:
- cache = apt.Cache()
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
@@ -746,47 +862,35 @@ def main():
# reopen cache w/ modified config
cache.open(progress=None)
+ mtimestamp, updated_cache_time = get_updated_cache_time()
+ # Cache valid time is default 0, which will update the cache if
+ # needed and `update_cache` was set to true
+ updated_cache = False
if p['update_cache']:
- # Default is: always update the cache
- cache_valid = False
now = datetime.datetime.now()
- if p.get('cache_valid_time', False):
- try:
- mtime = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime
- except:
- # Looks like the update-success-stamp is not available
- # Fallback: Checking the mtime of the lists
- try:
- mtime = os.stat(APT_LISTS_PATH).st_mtime
- except:
- # No mtime could be read. We update the cache to be safe
- mtime = False
-
- if mtime:
- tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
- mtimestamp = datetime.datetime.fromtimestamp(mtime)
- if mtimestamp + tdelta >= now:
- cache_valid = True
- updated_cache_time = int(time.mktime(mtimestamp.timetuple()))
-
- if cache_valid is not True:
- for retry in xrange(3):
+ tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
+ if not mtimestamp + tdelta >= now:
+ # Retry to update the cache up to 3 times
+ for retry in range(3):
try:
cache.update()
break
except apt.cache.FetchFailedException:
pass
else:
- #out of retries, pass on the exception
- raise
+ module.fail_json(msg='Failed to update apt cache.')
cache.open(progress=None)
updated_cache = True
- updated_cache_time = int(time.mktime(now.timetuple()))
+ mtimestamp, updated_cache_time = get_updated_cache_time()
+
+ # If there is nothing else to do exit. This will set state as
+ # changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
- module.exit_json(changed=False, cache_updated=updated_cache, cache_update_time=updated_cache_time)
- else:
- updated_cache = False
- updated_cache_time = 0
+ module.exit_json(
+ changed=updated_cache,
+ cache_updated=updated_cache,
+ cache_update_time=updated_cache_time
+ )
force_yes = p['force']
@@ -818,16 +922,27 @@ def main():
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
- result = install(module, packages, cache, upgrade=state_upgrade,
- default_release=p['default_release'],
- install_recommends=install_recommends,
- force=force_yes, dpkg_options=dpkg_options,
- build_dep=state_builddep, autoremove=autoremove,
- only_upgrade=p['only_upgrade'],
- allow_unauthenticated=allow_unauthenticated)
- (success, retvals) = result
- retvals['cache_updated']=updated_cache
- retvals['cache_update_time']=updated_cache_time
+
+ success, retvals = install(
+ module,
+ packages,
+ cache,
+ upgrade=state_upgrade,
+ default_release=p['default_release'],
+ install_recommends=install_recommends,
+ force=force_yes,
+ dpkg_options=dpkg_options,
+ build_dep=state_builddep,
+ autoremove=autoremove,
+ only_upgrade=p['only_upgrade'],
+ allow_unauthenticated=allow_unauthenticated
+ )
+
+ # Store if the cache has been updated
+ retvals['cache_updated'] = updated_cache
+ # Store when the update time was last
+ retvals['cache_update_time'] = updated_cache_time
+
if success:
module.exit_json(**retvals)
else:
@@ -840,9 +955,6 @@ def main():
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
if __name__ == "__main__":
main()
diff --git a/packaging/os/apt_key.py b/packaging/os/apt_key.py
index ebcdc3aa4b9..a5fd723f6c3 100644
--- a/packaging/os/apt_key.py
+++ b/packaging/os/apt_key.py
@@ -19,6 +19,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: apt_key
@@ -37,16 +41,17 @@
default: none
description:
- identifier of key. Including this allows check mode to correctly report the changed state.
+ - "If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead."
data:
required: false
default: none
description:
- - keyfile contents
+ - keyfile contents to add to the keyring
file:
required: false
default: none
description:
- - keyfile path
+ - path to a keyfile on the remote server to add to the keyring
keyring:
required: false
default: none
@@ -82,55 +87,118 @@
EXAMPLES = '''
# Add an apt key by id from a keyserver
-- apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
+- apt_key:
+ keyserver: keyserver.ubuntu.com
+ id: 36A1D7869245C8950F966E92D8576A8BA88D21E9
# Add an Apt signing key, uses whichever key is at the URL
-- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present
+- apt_key:
+ url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
+ state: present
# Add an Apt signing key, will not download if present
-- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present
+- apt_key:
+ id: 473041FA
+ url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
+ state: present
# Remove an Apt signing key, uses whichever key is at the URL
-- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=absent
+- apt_key:
+ url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
+ state: absent
# Remove a Apt specific signing key, leading 0x is valid
-- apt_key: id=0x473041FA state=absent
+- apt_key:
+ id: 0x473041FA
+ state: absent
# Add a key from a file on the Ansible server
-- apt_key: data="{{ lookup('file', 'apt.gpg') }}" state=present
+- apt_key:
+ data: "{{ lookup('file', 'apt.gpg') }}"
+ state: present
# Add an Apt signing key to a specific keyring file
-- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc keyring=/etc/apt/trusted.gpg.d/debian.gpg state=present
+- apt_key:
+ id: 473041FA
+ url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
+ keyring: /etc/apt/trusted.gpg.d/debian.gpg
+
+# Add Apt signing key on remote server to keyring
+- apt_key:
+ id: 473041FA
+ file: /tmp/apt.gpg
+ state: present
'''
# FIXME: standardize into module_common
from traceback import format_exc
-from re import compile as re_compile
-# FIXME: standardize into module_common
-from distutils.spawn import find_executable
-from os import environ
-from sys import exc_info
-import traceback
-match_key = re_compile("^gpg:.*key ([0-9a-fA-F]+):.*$")
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+apt_key_bin = None
+
+
+def find_needed_binaries(module):
+ global apt_key_bin
+
+ apt_key_bin = module.get_bin_path('apt-key', required=True)
+
+ ### FIXME: Is there a reason that gpg and grep are checked? Is it just
+ # cruft or does the apt .deb package not require them (and if they're not
+ # installed, /usr/bin/apt-key fails?)
+ module.get_bin_path('gpg', required=True)
+ module.get_bin_path('grep', required=True)
+
+
+def parse_key_id(key_id):
+ """validate the key_id and break it into segments
+
+ :arg key_id: The key_id as supplied by the user. A valid key_id will be
+ 8, 16, or more hexadecimal chars with an optional leading ``0x``.
+ :returns: The portion of key_id suitable for apt-key del, the portion
+ suitable for comparisons with --list-public-keys, and the portion that
+ can be used with --recv-key. If key_id is long enough, these will be
+ the last 8 characters of key_id, the last 16 characters, and all of
+ key_id. If key_id is not long enough, some of the values will be the
+ same.
+
+ * apt-key del <= 1.10 has a bug with key_id != 8 chars
+ * apt-key adv --list-public-keys prints 16 chars
+ * apt-key adv --recv-key can take more chars
+
+ """
+ # Make sure the key_id is valid hexadecimal
+ int(key_id, 16)
+
+ key_id = key_id.upper()
+ if key_id.startswith('0X'):
+ key_id = key_id[2:]
+
+ key_id_len = len(key_id)
+ if (key_id_len != 8 and key_id_len != 16) and key_id_len <= 16:
+ raise ValueError('key_id must be 8, 16, or 16+ hexadecimal characters in length')
+
+ short_key_id = key_id[-8:]
-REQUIRED_EXECUTABLES=['gpg', 'grep', 'apt-key']
+ fingerprint = key_id
+ if key_id_len > 16:
+ fingerprint = key_id[-16:]
+ return short_key_id, fingerprint, key_id
-def check_missing_binaries(module):
- missing = [e for e in REQUIRED_EXECUTABLES if not find_executable(e)]
- if len(missing):
- module.fail_json(msg="binaries are missing", names=missing)
def all_keys(module, keyring, short_format):
if keyring:
- cmd = "apt-key --keyring %s adv --list-public-keys --keyid-format=long" % keyring
+ cmd = "%s --keyring %s adv --list-public-keys --keyid-format=long" % (apt_key_bin, keyring)
else:
- cmd = "apt-key adv --list-public-keys --keyid-format=long"
+ cmd = "%s adv --list-public-keys --keyid-format=long" % apt_key_bin
(rc, out, err) = module.run_command(cmd)
results = []
- lines = out.split('\n')
+ lines = to_native(out).split('\n')
for line in lines:
if line.startswith("pub") or line.startswith("sub"):
tokens = line.split()
@@ -141,6 +209,7 @@ def all_keys(module, keyring, short_format):
results = shorten_key_ids(results)
return results
+
def shorten_key_ids(key_id_list):
"""
Takes a list of key ids, and converts them to the 'short' format,
@@ -151,6 +220,7 @@ def shorten_key_ids(key_id_list):
short.append(key[-8:])
return short
+
def download_key(module, url):
# FIXME: move get_url code to common, allow for in-memory D/L, support proxies
# and reuse here
@@ -166,59 +236,69 @@ def download_key(module, url):
except Exception:
module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
+
def import_key(module, keyring, keyserver, key_id):
if keyring:
- cmd = "apt-key --keyring %s adv --keyserver %s --recv %s" % (keyring, keyserver, key_id)
+ cmd = "%s --keyring %s adv --keyserver %s --recv %s" % (apt_key_bin, keyring, keyserver, key_id)
else:
- cmd = "apt-key adv --keyserver %s --recv %s" % (keyserver, key_id)
- for retry in xrange(5):
- (rc, out, err) = module.run_command(cmd)
+ cmd = "%s adv --keyserver %s --recv %s" % (apt_key_bin, keyserver, key_id)
+ for retry in range(5):
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ (rc, out, err) = module.run_command(cmd, environ_update=lang_env)
if rc == 0:
break
else:
# Out of retries
- module.fail_json(cmd=cmd, msg="error fetching key from keyserver: %s" % keyserver,
- rc=rc, stdout=out, stderr=err)
+ if rc == 2 and 'not found on keyserver' in out:
+ msg = 'Key %s not found on keyserver %s' % (key_id, keyserver)
+ module.fail_json(cmd=cmd, msg=msg)
+ else:
+ msg = "Error fetching key %s from keyserver: %s" % (key_id, keyserver)
+ module.fail_json(cmd=cmd, msg=msg, rc=rc, stdout=out, stderr=err)
return True
+
def add_key(module, keyfile, keyring, data=None):
if data is not None:
if keyring:
- cmd = "apt-key --keyring %s add -" % keyring
+ cmd = "%s --keyring %s add -" % (apt_key_bin, keyring)
else:
- cmd = "apt-key add -"
+ cmd = "%s add -" % apt_key_bin
(rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True)
else:
if keyring:
- cmd = "apt-key --keyring %s add %s" % (keyring, keyfile)
+ cmd = "%s --keyring %s add %s" % (apt_key_bin, keyring, keyfile)
else:
- cmd = "apt-key add %s" % (keyfile)
+ cmd = "%s add %s" % (apt_key_bin, keyfile)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
+
def remove_key(module, key_id, keyring):
# FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout
if keyring:
- cmd = 'apt-key --keyring %s del %s' % (keyring, key_id)
+ cmd = '%s --keyring %s del %s' % (apt_key_bin, keyring, key_id)
else:
- cmd = 'apt-key del %s' % key_id
+ cmd = '%s del %s' % (apt_key_bin, key_id)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
+
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(required=False, default=None),
url=dict(required=False),
data=dict(required=False),
- file=dict(required=False),
+ file=dict(required=False, type='path'),
key=dict(required=False),
- keyring=dict(required=False),
+ keyring=dict(required=False, type='path'),
validate_certs=dict(default='yes', type='bool'),
keyserver=dict(required=False),
state=dict(required=False, choices=['present', 'absent'], default='present')
),
- supports_check_mode=True
+ supports_check_mode=True,
+ mutually_exclusive=(('filename', 'keyserver', 'data', 'url'),),
)
key_id = module.params['id']
@@ -230,64 +310,70 @@ def main():
keyserver = module.params['keyserver']
changed = False
- # we use the "short" id: key_id[-8:], short_format=True
- # it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
-
+ fingerprint = short_key_id = key_id
+ short_format = False
if key_id:
try:
- _ = int(key_id, 16)
- if key_id.startswith('0x'):
- key_id = key_id[2:]
- key_id = key_id.upper()[-8:]
+ short_key_id, fingerprint, key_id = parse_key_id(key_id)
except ValueError:
- module.fail_json(msg="Invalid key_id", id=key_id)
+ module.fail_json(msg='Invalid key_id', id=key_id)
+
+ if len(fingerprint) == 8:
+ short_format = True
- # FIXME: I think we have a common facility for this, if not, want
- check_missing_binaries(module)
+ find_needed_binaries(module)
- short_format = True
keys = all_keys(module, keyring, short_format)
return_values = {}
if state == 'present':
- if key_id and key_id in keys:
+ if fingerprint and fingerprint in keys:
module.exit_json(changed=False)
+ elif fingerprint and fingerprint not in keys and module.check_mode:
+ ### TODO: Someday we could go further -- write keys out to
+ # a temporary file and then extract the key id from there via gpg
+ # to decide if the key is installed or not.
+ module.exit_json(changed=True)
else:
if not filename and not data and not keyserver:
data = download_key(module, url)
- if key_id and key_id in keys:
- module.exit_json(changed=False)
+
+ if filename:
+ add_key(module, filename, keyring)
+ elif keyserver:
+ import_key(module, keyring, keyserver, key_id)
else:
- if module.check_mode:
- module.exit_json(changed=True)
- if filename:
- add_key(module, filename, keyring)
- elif keyserver:
- import_key(module, keyring, keyserver, key_id)
- else:
- add_key(module, "-", keyring, data)
- changed=False
- keys2 = all_keys(module, keyring, short_format)
- if len(keys) != len(keys2):
- changed=True
- if key_id and not key_id in keys2:
- module.fail_json(msg="key does not seem to have been added", id=key_id)
- module.exit_json(changed=changed)
+ add_key(module, "-", keyring, data)
+
+ changed = False
+ keys2 = all_keys(module, keyring, short_format)
+ if len(keys) != len(keys2):
+ changed=True
+
+ if fingerprint and fingerprint not in keys2:
+ module.fail_json(msg="key does not seem to have been added", id=key_id)
+ module.exit_json(changed=changed)
+
elif state == 'absent':
if not key_id:
module.fail_json(msg="key is required")
- if key_id in keys:
+ if fingerprint in keys:
if module.check_mode:
module.exit_json(changed=True)
- if remove_key(module, key_id, keyring):
- changed=True
+
+ # we use the "short" id: key_id[-8:], short_format=True
+ # it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
+ if remove_key(module, short_key_id, keyring):
+ keys = all_keys(module, keyring, short_format)
+ if fingerprint in keys:
+ module.fail_json(msg="apt-key del did not return an error but the key was not removed (check that the id is correct and *not* a subkey)", id=key_id)
+ changed = True
else:
- # FIXME: module.fail_json or exit-json immediately at point of failure
+ # FIXME: module.fail_json or exit-json immediately at point of failure
module.fail_json(msg="error removing key_id", **return_values)
module.exit_json(changed=changed, **return_values)
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py
index 1a19c12e47a..dc60a2a722f 100644
--- a/packaging/os/apt_repository.py
+++ b/packaging/os/apt_repository.py
@@ -21,6 +21,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: apt_repository
@@ -28,9 +32,8 @@
description:
- Add or remove an APT repositories in Ubuntu and Debian.
notes:
- - This module works on Debian and Ubuntu and requires C(python-apt).
+ - This module works on Debian, Ubuntu and their derivatives.
- This module supports Debian Squeeze (version 6) as well as its successors.
- - This module treats Debian and Ubuntu distributions separately. So PPA could be installed only on Ubuntu machines.
options:
repo:
required: true
@@ -70,32 +73,57 @@
Defaults to a file name based on the repository source url.
The .list extension will be automatically added.
required: false
+ codename:
+ version_added: '2.3'
+ description:
+ - Override the distribution codename to use for PPA repositories.
+ Should usually only be set when working with a PPA on a non-Ubuntu target (e.g. Debian or Mint)
+ required: false
author: "Alexander Saltanov (@sashka)"
version_added: "0.7"
-requirements: [ python-apt ]
+requirements:
+ - python-apt (python 2)
+ - python3-apt (python 3)
'''
EXAMPLES = '''
# Add specified repository into sources list.
-apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=present
+- apt_repository:
+ repo: deb http://archive.canonical.com/ubuntu hardy partner
+ state: present
# Add specified repository into sources list using specified filename.
-apt_repository: repo='deb http://dl.google.com/linux/chrome/deb/ stable main' state=present filename='google-chrome'
+- apt_repository:
+ repo: deb http://dl.google.com/linux/chrome/deb/ stable main
+ state: present
+ filename: 'google-chrome'
# Add source repository into sources list.
-apt_repository: repo='deb-src http://archive.canonical.com/ubuntu hardy partner' state=present
+- apt_repository:
+ repo: deb-src http://archive.canonical.com/ubuntu hardy partner
+ state: present
# Remove specified repository from sources list.
-apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=absent
-
-# On Ubuntu target: add nginx stable repository from PPA and install its signing key.
-# On Debian target: adding PPA is not available, so it will fail immediately.
-apt_repository: repo='ppa:nginx/stable'
+- apt_repository:
+ repo: deb http://archive.canonical.com/ubuntu hardy partner
+ state: absent
+
+# Add nginx stable repository from PPA and install its signing key.
+# On Ubuntu target:
+- apt_repository:
+ repo: 'ppa:nginx/stable'
+
+# On Debian target
+- apt_repository:
+ repo: 'ppa:nginx/stable'
+ codename: 'trusty'
+ repo: 'ppa:nginx/stable'
'''
import glob
import os
import re
+import sys
import tempfile
try:
@@ -108,10 +136,16 @@
distro = None
HAVE_PYTHON_APT = False
+if sys.version_info[0] < 3:
+ PYTHON_APT = 'python-apt'
+else:
+ PYTHON_APT = 'python3-apt'
+
DEFAULT_SOURCES_PERM = int('0644', 8)
VALID_SOURCE_TYPES = ('deb', 'deb-src')
+
def install_python_apt(module):
if not module.check_mode:
@@ -119,8 +153,8 @@ def install_python_apt(module):
if apt_get_path:
rc, so, se = module.run_command([apt_get_path, 'update'])
if rc != 0:
- module.fail_json(msg="Failed to auto-install python-apt. Error was: '%s'" % se.strip())
- rc, so, se = module.run_command([apt_get_path, 'install', 'python-apt', '-y', '-q'])
+ module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip()))
+ rc, so, se = module.run_command([apt_get_path, 'install', PYTHON_APT, '-y', '-q'])
if rc == 0:
global apt, apt_pkg, aptsources_distro, distro, HAVE_PYTHON_APT
import apt
@@ -129,9 +163,10 @@ def install_python_apt(module):
distro = aptsources_distro.get_distro()
HAVE_PYTHON_APT = True
else:
- module.fail_json(msg="Failed to auto-install python-apt. Error was: '%s'" % se.strip())
+ module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip()))
else:
- module.fail_json(msg="python-apt must be installed to use check mode")
+ module.fail_json(msg="%s must be installed to use check mode" % PYTHON_APT)
+
class InvalidSource(Exception):
pass
@@ -255,7 +290,7 @@ def load(self, file):
self.files[file] = group
def save(self):
- for filename, sources in self.files.items():
+ for filename, sources in list(self.files.items()):
if sources:
d, fn = os.path.split(filename)
fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
@@ -365,6 +400,7 @@ class UbuntuSourcesList(SourcesList):
def __init__(self, module, add_ppa_signing_keys_callback=None):
self.module = module
self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback
+ self.codename = module.params['codename'] or distro.codename
super(UbuntuSourcesList, self).__init__(module)
def _get_ppa_info(self, owner_name, ppa_name):
@@ -374,7 +410,7 @@ def _get_ppa_info(self, owner_name, ppa_name):
response, info = fetch_url(self.module, lp_api, headers=headers)
if info['status'] != 200:
self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg'])
- return json.load(response)
+ return json.loads(to_native(response.read()))
def _expand_ppa(self, path):
ppa = path.split(':')[1]
@@ -384,7 +420,7 @@ def _expand_ppa(self, path):
except IndexError:
ppa_name = 'ppa'
- line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, distro.codename)
+ line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, self.codename)
return line, ppa_owner, ppa_name
def _key_already_exists(self, key_fingerprint):
@@ -405,7 +441,7 @@ def add_source(self, line, comment='', file=None):
command = ['apt-key', 'adv', '--recv-keys', '--keyserver', 'hkp://keyserver.ubuntu.com:80', info['signing_key_fingerprint']]
self.add_ppa_signing_keys_callback(command)
- file = file or self._suggest_filename('%s_%s' % (line, distro.codename))
+ file = file or self._suggest_filename('%s_%s' % (line, self.codename))
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
file = file or self._suggest_filename(source)
@@ -459,6 +495,7 @@ def main():
# this should not be needed, but exists as a failsafe
install_python_apt=dict(required=False, default="yes", type='bool'),
validate_certs = dict(default='yes', type='bool'),
+ codename = dict(required=False),
),
supports_check_mode=True,
)
@@ -475,15 +512,13 @@ def main():
if params['install_python_apt']:
install_python_apt(module)
else:
- module.fail_json(msg='python-apt is not installed, and install_python_apt is False')
+ module.fail_json(msg='%s is not installed, and install_python_apt is False' % PYTHON_APT)
- if isinstance(distro, aptsources_distro.UbuntuDistribution):
+ if isinstance(distro, aptsources_distro.Distribution):
sourceslist = UbuntuSourcesList(module,
add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module))
- elif isinstance(distro, aptsources_distro.Distribution):
- sourceslist = SourcesList(module)
else:
- module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu.')
+ module.fail_json(msg='Module apt_repository is not supported on target.')
sources_before = sourceslist.dump()
@@ -525,4 +560,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/apt_rpm.py b/packaging/os/apt_rpm.py
old mode 100644
new mode 100755
index fec220e0512..47f6b194cf1
--- a/packaging/os/apt_rpm.py
+++ b/packaging/os/apt_rpm.py
@@ -19,6 +19,10 @@
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: apt_rpm
@@ -50,13 +54,25 @@
EXAMPLES = '''
# install package foo
-- apt_rpm: pkg=foo state=present
+- apt_rpm:
+ pkg: foo
+ state: present
+
# remove package foo
-- apt_rpm: pkg=foo state=absent
-# description: remove packages foo and bar
-- apt_rpm: pkg=foo,bar state=absent
-# description: update the package database and install bar (bar will be the updated if a newer version exists)
-- apt_rpm: name=bar state=present update_cache=yes
+- apt_rpm:
+ pkg: foo
+ state: absent
+
+# description: remove packages foo and bar
+- apt_rpm:
+ pkg: foo,bar
+ state: absent
+
+# description: update the package database and install bar (bar will be the updated if a newer version exists)
+- apt_rpm:
+ name: bar
+ state: present
+ update_cache: yes
'''
@@ -75,7 +91,7 @@
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
- rc = os.system("%s -q %s" % (RPM_PATH,name))
+ rc, out, err = module.run_command("%s -q %s" % (RPM_PATH,name))
if rc == 0:
return True
else:
@@ -84,14 +100,14 @@ def query_package(module, name):
def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
- rc = os.system("%s -q --provides %s >/dev/null" % (RPM_PATH,name))
+ rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH,name))
return rc == 0
def update_package_db(module):
- rc = os.system("%s update" % APT_PATH)
+ rc, out, err = module.run_command("%s update" % APT_PATH)
if rc != 0:
- module.fail_json(msg="could not update package db")
+ module.fail_json(msg="could not update package db: %s" % err)
def remove_packages(module, packages):
@@ -102,10 +118,10 @@ def remove_packages(module, packages):
if not query_package(module, package):
continue
- rc = os.system("%s -y remove %s > /dev/null" % (APT_PATH,package))
+ rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH,package))
if rc != 0:
- module.fail_json(msg="failed to remove %s" % (package))
+ module.fail_json(msg="failed to remove %s: %s" % (package, err))
remove_c += 1
@@ -124,9 +140,7 @@ def install_packages(module, pkgspec):
if len(packages) != 0:
- cmd = ("%s -y install %s > /dev/null" % (APT_PATH, packages))
-
- rc, out, err = module.run_command(cmd)
+ rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
installed = True
for packages in pkgspec:
@@ -169,4 +183,5 @@ def main():
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/package.py b/packaging/os/package.py
index 2ae7c7fbc8e..85712b6d903 100644
--- a/packaging/os/package.py
+++ b/packaging/os/package.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: package
@@ -53,9 +57,13 @@
'''
EXAMPLES = '''
- name: install the latest version of ntpdate
- package: name=ntpdate state=latest
+ package:
+ name: ntpdate
+ state: latest
# This uses a variable as this changes per distribution.
- name: remove the apache package
- package: name={{apache}} state=absent
+ package:
+ name: "{{ apache }}"
+ state: absent
'''
diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py
index f1440772500..0b56c6eafbf 100644
--- a/packaging/os/redhat_subscription.py
+++ b/packaging/os/redhat_subscription.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: redhat_subscription
@@ -71,7 +75,7 @@
default: null
org_id:
description:
- - Organisation ID to use in conjunction with activationkey
+ - Organization ID to use in conjunction with activationkey
required: False
default: null
version_added: "2.0"
@@ -114,30 +118,41 @@
EXAMPLES = '''
# Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
-- redhat_subscription: state=present username=joe_user password=somepass autosubscribe=true
+- redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ autosubscribe: true
# Same as above but with pulling existing system data.
-- redhat_subscription: state=present username=joe_user password=somepass
- consumer_id=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+- redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
# Register with activationkey (1-222333444) and consume subscriptions matching
# the names (Red hat Enterprise Server) and (Red Hat Virtualization)
-- redhat_subscription: state=present
- activationkey=1-222333444
- pool='^(Red Hat Enterprise Server|Red Hat Virtualization)$'
+- redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'
# Update the consumed subscriptions from the previous example (remove the Red
# Hat Virtualization subscription)
-- redhat_subscription: state=present
- activationkey=1-222333444
- pool='^Red Hat Enterprise Server$'
+- redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ pool: '^Red Hat Enterprise Server$'
# Register as user credentials into given environment (against Red Hat
# Satellite 6.x), and auto-subscribe to available content.
-- redhat_subscription: state=present
- username=joe_user password=somepass
- environment=Library
- autosubscribe=true
+- redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ environment: Library
+ autosubscribe: yes
'''
import os
@@ -541,4 +556,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/rhn_channel.py b/packaging/os/rhn_channel.py
index 0071183158e..c2e87f1bd91 100644
--- a/packaging/os/rhn_channel.py
+++ b/packaging/os/rhn_channel.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rhn_channel
@@ -26,7 +30,7 @@
version_added: "1.1"
author: "Vincent Van der Kussen (@vincentvdk)"
notes:
- - this module fetches the system id from RHN.
+ - this module fetches the system id from RHN.
requirements:
- none
options:
@@ -46,7 +50,7 @@
required: false
default: present
url:
- description:
+ description:
- The full url to the RHN/Satellite api
required: true
user:
@@ -60,7 +64,12 @@
'''
EXAMPLES = '''
-- rhn_channel: name=rhel-x86_64-server-v2vwin-6 sysname=server01 url=https://rhn.redhat.com/rpc/api user=rhnuser password=guessme
+- rhn_channel:
+ name: rhel-x86_64-server-v2vwin-6
+ sysname: server01
+ url: https://rhn.redhat.com/rpc/api
+ user: rhnuser
+ password: guessme
'''
import xmlrpclib
@@ -166,5 +175,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/rhn_register.py b/packaging/os/rhn_register.py
index f30cf09084d..c228f0b1b7a 100644
--- a/packaging/os/rhn_register.py
+++ b/packaging/os/rhn_register.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rhn_register
@@ -83,30 +87,44 @@
EXAMPLES = '''
# Unregister system from RHN.
-- rhn_register: state=absent username=joe_user password=somepass
+- rhn_register:
+ state: absent
+ username: joe_user
+ password: somepass
# Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
-- rhn_register: state=present username=joe_user password=somepass
+- rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
# Register with activationkey (1-222333444) and enable extended update support.
-- rhn_register: state=present activationkey=1-222333444 enable_eus=true
+- rhn_register:
+ state: present
+ activationkey: 1-222333444
+ enable_eus: true
# Register with activationkey (1-222333444) and set a profilename which may differ from the hostname.
-- rhn_register: state=present activationkey=1-222333444 profilename=host.example.com.custom
+- rhn_register:
+ state: present
+ activationkey: 1-222333444
+ profilename: host.example.com.custom
# Register as user (joe_user) with password (somepass) against a satellite
# server specified by (server_url).
-- rhn_register: >
- state=present
- username=joe_user
- password=somepass
- server_url=https://xmlrpc.my.satellite/XMLRPC
+- rhn_register:
+ state: present
+ username: joe_user
+ password: somepass'
+ server_url: https://xmlrpc.my.satellite/XMLRPC
# Register as user (joe_user) with password (somepass) and enable
# channels (rhel-x86_64-server-6-foo-1) and (rhel-x86_64-server-6-bar-1).
-- rhn_register: state=present username=joe_user
- password=somepass
- channels=rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
+- rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
'''
import sys
@@ -147,9 +165,10 @@ def load_config(self):
# configuration. Yeah, I know this should be subclassed ... but, oh
# well
def get_option_default(self, key, default=''):
- # ignore pep8 W601 errors for this line
- # setting this to use 'in' does not work in the rhn library
- if self.has_key(key):
+ # the class in rhn-client-tools that this comes from didn't
+ # implement __contains__() until 2.5.x. That's why we check if
+ # the key is present in the dictionary that is the actual storage
+ if key in self.dict:
return self[key]
else:
return default
@@ -407,4 +426,5 @@ def main():
module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
-main()
+if __name__ == '__main__':
+ main()
diff --git a/packaging/os/rpm_key.py b/packaging/os/rpm_key.py
index b4d359658eb..9cb058c56aa 100644
--- a/packaging/os/rpm_key.py
+++ b/packaging/os/rpm_key.py
@@ -19,6 +19,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: rpm_key
@@ -39,7 +43,7 @@
default: "present"
choices: [present, absent]
description:
- - Wheather the key will be imported or removed from the rpm db.
+ - If the key will be imported or removed from the rpm db.
validate_certs:
description:
- If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used
@@ -52,13 +56,19 @@
EXAMPLES = '''
# Example action to import a key from a url
-- rpm_key: state=present key=http://apt.sw.be/RPM-GPG-KEY.dag.txt
+- rpm_key:
+ state: present
+ key: http://apt.sw.be/RPM-GPG-KEY.dag.txt
# Example action to import a key from a file
-- rpm_key: state=present key=/path/to/key.gpg
+- rpm_key:
+ state: present
+ key: /path/to/key.gpg
# Example action to ensure a key is not present in the db
-- rpm_key: state=absent key=DEADB33F
+- rpm_key:
+ state: absent
+ key: DEADB33F
'''
import re
import os.path
diff --git a/packaging/os/yum.py b/packaging/os/yum.py
index cc0ad419f2f..18e71713902 100644
--- a/packaging/os/yum.py
+++ b/packaging/os/yum.py
@@ -36,6 +36,10 @@
except:
transaction_helpers = False
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: yum
@@ -156,31 +160,50 @@
EXAMPLES = '''
- name: install the latest version of Apache
- yum: name=httpd state=latest
+ yum:
+ name: httpd
+ state: latest
- name: remove the Apache package
- yum: name=httpd state=absent
+ yum:
+ name: httpd
+ state: absent
- name: install the latest version of Apache from the testing repo
- yum: name=httpd enablerepo=testing state=present
+ yum:
+ name: httpd
+ enablerepo: testing
+ state: present
- name: install one specific version of Apache
- yum: name=httpd-2.2.29-1.4.amzn1 state=present
+ yum:
+ name: httpd-2.2.29-1.4.amzn1
+ state: present
- name: upgrade all packages
- yum: name=* state=latest
+ yum:
+ name: '*'
+ state: latest
- name: install the nginx rpm from a remote repo
- yum: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
+ yum:
+ name: http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
- name: install nginx rpm from a local file
- yum: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present
+ yum:
+ name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
- name: install the 'Development tools' package group
- yum: name="@Development tools" state=present
+ yum:
+ name: "@Development tools"
+ state: present
- name: install the 'Gnome desktop' environment group
- yum: name="@^gnome-desktop-environment" state=present
+ yum:
+ name: "@^gnome-desktop-environment"
+ state: present
'''
# 64k. Number of bytes to read at a time when manually downloading pkgs via a url
@@ -225,6 +248,8 @@ def fetch_rpm_from_url(spec, module=None):
package = os.path.join(tempdir, str(spec.rsplit('/', 1)[1]))
try:
rsp, info = fetch_url(module, spec)
+ if not rsp:
+ module.fail_json(msg="Failure downloading %s, %s" % (spec, info['msg']))
f = open(package, 'w')
data = rsp.read(BUFSIZE)
while data:
@@ -507,24 +532,11 @@ def local_nvra(module, path):
finally:
os.close(fd)
- return '%s-%s-%s.%s' % (header[rpm.RPMTAG_NAME],
+ return '%s-%s-%s.%s' % (header[rpm.RPMTAG_NAME],
header[rpm.RPMTAG_VERSION],
header[rpm.RPMTAG_RELEASE],
header[rpm.RPMTAG_ARCH])
-
-def local_name(module, path):
- """return package name of a local rpm passed in"""
-
- ts = rpm.TransactionSet()
- ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
- fd = os.open(path, os.O_RDONLY)
- try:
- header = ts.hdrFromFdno(fd)
- finally:
- os.close(fd)
- return header[rpm.RPMTAG_NAME]
-
def pkg_to_dict(pkgstr):
if pkgstr.strip():
@@ -596,13 +608,15 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
if spec.endswith('.rpm') and '://' not in spec:
# get the pkg name-v-r.arch
if not os.path.exists(spec):
- res['msg'] += "No Package file matching '%s' found on system" % spec
+ res['msg'] += "No RPM file matching '%s' found on system" % spec
+ res['results'].append("No RPM file matching '%s' found on system" % spec)
+ res['rc'] = 127 # Ensure the task fails in with-loop
module.fail_json(**res)
- pkg_name = local_name(module, spec)
+ nvra = local_nvra(module, spec)
# look for them in the rpmdb
- if is_installed(module, repoq, pkg_name, conf_file, en_repos=en_repos, dis_repos=dis_repos):
+ if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos):
# if they are there, skip it
continue
pkg = spec
@@ -611,8 +625,8 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
elif '://' in spec:
# download package so that we can check if it's already installed
package = fetch_rpm_from_url(spec, module=module)
- pkg_name = local_name(module, package)
- if is_installed(module, repoq, pkg_name, conf_file, en_repos=en_repos, dis_repos=dis_repos):
+ nvra = local_nvra(module, package)
+ if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos):
# if it's there, skip it
continue
pkg = package
@@ -632,11 +646,13 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec))
continue
-
+
# look up what pkgs provide this
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
if not pkglist:
- res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
+ res['msg'] += "No package matching '%s' found available, installed or updated" % spec
+ res['results'].append("No package matching '%s' found available, installed or updated" % spec)
+ res['rc'] = 126 # Ensure the task fails in with-loop
module.fail_json(**res)
# if any of the packages are involved in a transaction, fail now
@@ -644,6 +660,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
conflicts = transaction_exists(pkglist)
if len(conflicts) > 0:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
+ res['rc'] = 125 # Ensure the task fails in with-loop
module.fail_json(**res)
# if any of them are installed
@@ -698,8 +715,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
for spec in items:
# Fail on invalid urls:
if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
- err = 'Package at %s could not be installed' % spec
- module.fail_json(changed=False,msg=err,rc=1)
+ module.fail_json(msg='Package at %s could not be installed' % spec, rc=1, changed=False)
if (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out:
# avoid failing in the 'Nothing To Do' case
# this may happen with an URL spec.
@@ -771,7 +787,7 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
# of the process
# at this point we should check to see if the pkg is no longer present
-
+
for pkg in pkgs:
if not pkg.startswith('@'): # we can't sensibly check for a group being uninstalled reliably
# look to see if the pkg shows up from is_installed. If it doesn't
@@ -809,6 +825,8 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
res['results'].append('Nothing to do here, all packages are up to date')
return res
elif rc == 100:
+ # remove incorrect new lines in longer columns in output from yum check-update
+ out=re.sub('\n\W+', ' ', out)
available_updates = out.split('\n')
# build update dictionary
for line in available_updates:
@@ -837,6 +855,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
# some guess work involved with groups. update @ will install the group if missing
if spec.startswith('@'):
pkgs['update'].append(spec)
+ will_update.add(spec)
continue
# dep/pkgname - find it
else:
@@ -847,7 +866,9 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
# FIXME..? may not be desirable to throw an exception here if a single package is missing
if not pkglist:
- res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
+ res['msg'] += "No package matching '%s' found available, installed or updated" % spec
+ res['results'].append("No package matching '%s' found available, installed or updated" % spec)
+ res['rc'] = 126 # Ensure the task fails in with-loop
module.fail_json(**res)
nothing_to_do = True
@@ -860,7 +881,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
# or virtual provides (like "python-*" or "smtp-daemon") while
# updates contains name only.
this_name_only = '-'.join(this.split('-')[:-2])
- if spec in pkgs['update'] and this_name_only in updates.keys():
+ if spec in pkgs['update'] and this_name_only in updates:
nothing_to_do = False
will_update.add(spec)
# Massage the updates list
@@ -879,6 +900,8 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
conflicts = transaction_exists(pkglist)
if len(conflicts) > 0:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
+ res['results'].append("The following packages have pending transactions: %s" % ", ".join(conflicts))
+ res['rc'] = 128 # Ensure the task fails in with-loop
module.fail_json(**res)
# check_mode output
@@ -909,14 +932,16 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
if len(pkgs['install']) > 0: # install missing
cmd = yum_basecmd + ['install'] + pkgs['install']
rc, out, err = module.run_command(cmd)
- res['changed'] = True
+ if not out.strip().lower().endswith("no packages marked for update"):
+ res['changed'] = True
else:
rc, out, err = [0, '', '']
if len(will_update) > 0: # update present
cmd = yum_basecmd + ['update'] + pkgs['update']
rc2, out2, err2 = module.run_command(cmd)
- res['changed'] = True
+ if not out2.strip().lower().endswith("no packages marked for update"):
+ res['changed'] = True
else:
rc2, out2, err2 = [0, '', '']
@@ -937,7 +962,14 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo,
disable_gpg_check, exclude, repoq):
- yumbin = module.get_bin_path('yum')
+ # fedora will redirect yum to dnf, which has incompatibilities
+ # with how this module expects yum to operate. If yum-deprecated
+ # is available, use that instead to emulate the old behaviors.
+ if module.get_bin_path('yum-deprecated'):
+ yumbin = module.get_bin_path('yum-deprecated')
+ else:
+ yumbin = module.get_bin_path('yum')
+
# need debug level 2 to get 'Nothing to do' for groupinstall.
yum_basecmd = [yumbin, '-d', '2', '-y']
@@ -999,7 +1031,7 @@ def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo,
else:
# should be caught by AnsibleModule argument_spec
module.fail_json(msg="we should never get here unless this all"
- " failed", changed=False, results='', errors='unepected state')
+ " failed", changed=False, results='', errors='unexpected state')
return res
diff --git a/shippable.yml b/shippable.yml
deleted file mode 100644
index c182c149488..00000000000
--- a/shippable.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-language: python
-
-env:
- matrix:
- - TEST=none
-
-matrix:
- exclude:
- - env: TEST=none
- include:
- - env: TEST=integration IMAGE=ansible/ansible:centos6
- - env: TEST=integration IMAGE=ansible/ansible:centos7
- - env: TEST=integration IMAGE=ansible/ansible:fedora-rawhide
- - env: TEST=integration IMAGE=ansible/ansible:fedora23
- - env: TEST=integration IMAGE=ansible/ansible:opensuseleap
- - env: TEST=integration IMAGE=ansible/ansible:ubuntu1204 PRIVILEGED=true
- - env: TEST=integration IMAGE=ansible/ansible:ubuntu1404 PRIVILEGED=true
- - env: TEST=integration IMAGE=ansible/ansible:ubuntu1604
-
- - env: TEST=integration PLATFORM=windows VERSION=2008-SP2
- - env: TEST=integration PLATFORM=windows VERSION=2008-R2_SP1
- - env: TEST=integration PLATFORM=windows VERSION=2012-RTM
- - env: TEST=integration PLATFORM=windows VERSION=2012-R2_RTM
-
- - env: TEST=integration PLATFORM=freebsd VERSION=10.3-STABLE
-
- - env: TEST=integration PLATFORM=osx VERSION=10.11
-build:
- pre_ci_boot:
- options: "--privileged=false --net=bridge"
- ci:
- - test/utils/shippable/ci.sh
-
-integrations:
- notifications:
- - integrationName: email
- type: email
- on_success: never
- on_failure: never
- on_start: never
- on_pull_request: never
- - integrationName: irc
- type: irc
- recipients:
- - "chat.freenode.net#ansible-notices"
- on_success: change
- on_failure: always
- on_start: never
- on_pull_request: always
- - integrationName: slack
- type: slack
- recipients:
- - "#shippable"
- on_success: change
- on_failure: always
- on_start: never
- on_pull_request: never
diff --git a/source_control/git.py b/source_control/git.py
index 1641eac6498..6c79e8a3668 100644
--- a/source_control/git.py
+++ b/source_control/git.py
@@ -18,10 +18,14 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: git
-author:
+author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.0.1"
@@ -46,8 +50,8 @@
default: "HEAD"
description:
- What version of the repository to check out. This can be the
- the literal string C(HEAD), a branch name, a tag name.
- It can also be a I(SHA-1) hash, in which case C(refspec) needs
+ the literal string C(HEAD), a branch name, a tag name.
+ It can also be a I(SHA-1) hash, in which case C(refspec) needs
to be specified if the given revision is not already available.
accept_hostkey:
required: false
@@ -55,8 +59,8 @@
choices: [ "yes", "no" ]
version_added: "1.5"
description:
- - if C(yes), adds the hostkey for the repo url if not already
- added. If ssh_opts contains "-o StrictHostKeyChecking=no",
+ - if C(yes), adds the hostkey for the repo url if not already
+ added. If ssh_opts contains "-o StrictHostKeyChecking=no",
this parameter is ignored.
ssh_opts:
required: false
@@ -186,39 +190,58 @@
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
- SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
- one solution is to use the option accept_hostkey. Another solution is to
- add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
+ SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
+ one solution is to use the option accept_hostkey. Another solution is to
+ add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
'''
EXAMPLES = '''
# Example git checkout from Ansible Playbooks
-- git: repo=git://foosball.example.org/path/to/repo.git
- dest=/srv/checkout
- version=release-0.22
+- git:
+ repo: git://foosball.example.org/path/to/repo.git
+ dest: /srv/checkout
+ version: release-0.22
# Example read-write git checkout from github
-- git: repo=ssh://git@github.com/mylogin/hello.git dest=/home/mylogin/hello
+- git:
+ repo: ssh://git@github.com/mylogin/hello.git
+ dest: /home/mylogin/hello
# Example just ensuring the repo checkout exists
-- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout update=no
+- git:
+ repo: git://foosball.example.org/path/to/repo.git
+ dest: /srv/checkout
+ update: no
# Example just get information about the repository whether or not it has
# already been cloned locally.
-- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout clone=no update=no
+- git:
+ repo: git://foosball.example.org/path/to/repo.git
+ dest: /srv/checkout
+ clone: no
+ update: no
# Example checkout a github repo and use refspec to fetch all pull requests
-- git: repo=https://github.com/ansible/ansible-examples.git dest=/src/ansible-examples refspec=+refs/pull/*:refs/heads/*
+- git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ refspec: '+refs/pull/*:refs/heads/*'
'''
import os
import re
+import shlex
+import stat
+import sys
import tempfile
from distutils.version import LooseVersion
-from ansible.module_utils.six import string_types
-from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule, get_module_path
+from ansible.module_utils.known_hosts import add_git_host_key
+from ansible.module_utils.six import b, string_types
+from ansible.module_utils._text import to_native
+
def head_splitter(headfile, remote, module=None, fail_on_error=False):
'''Extract the head reference'''
@@ -253,13 +276,13 @@ def unfrackgitpath(path):
def get_submodule_update_params(module, git_path, cwd):
- #or: git submodule [--quiet] update [--init] [-N|--no-fetch]
- #[-f|--force] [--rebase] [--reference ] [--merge]
+ #or: git submodule [--quiet] update [--init] [-N|--no-fetch]
+ #[-f|--force] [--rebase] [--reference ] [--merge]
#[--recursive] [--] [...]
params = []
- # run a bad submodule command to get valid params
+ # run a bad submodule command to get valid params
cmd = "%s submodule update --help" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
lines = stderr.split('\n')
@@ -272,7 +295,7 @@ def get_submodule_update_params(module, git_path, cwd):
update_line = update_line.replace(']','')
update_line = update_line.replace('|',' ')
parts = shlex.split(update_line)
- for part in parts:
+ for part in parts:
if part.startswith('--'):
part = part.replace('--', '')
params.append(part)
@@ -301,7 +324,7 @@ def write_ssh_wrapper():
if [ -z "$GIT_KEY" ]; then
ssh $BASEOPTS "$@"
else
- ssh -i "$GIT_KEY" $BASEOPTS "$@"
+ ssh -i "$GIT_KEY" -o IdentitiesOnly=yes $BASEOPTS "$@"
fi
"""
fh.write(template)
@@ -320,7 +343,7 @@ def set_git_ssh(ssh_wrapper, key_file, ssh_opts):
del os.environ["GIT_KEY"]
if key_file:
- os.environ["GIT_KEY"] = key_file
+ os.environ["GIT_KEY"] = key_file
if os.environ.get("GIT_SSH_OPTS"):
del os.environ["GIT_SSH_OPTS"]
@@ -369,18 +392,17 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare,
pass
cmd = [ git_path, 'clone' ]
- branch_or_tag = is_remote_branch(git_path, module, dest, repo, version) \
- or is_remote_tag(git_path, module, dest, repo, version)
-
if bare:
cmd.append('--bare')
else:
cmd.extend([ '--origin', remote ])
- if branch_or_tag:
- cmd.extend([ '--branch', version ])
- if depth and (branch_or_tag or version == 'HEAD' or refspec):
- # only use depth if the remote opject is branch or tag (i.e. fetchable)
- cmd.extend([ '--depth', str(depth) ])
+ if depth:
+ if version == 'HEAD' \
+ or refspec \
+ or is_remote_branch(git_path, module, dest, repo, version) \
+ or is_remote_tag(git_path, module, dest, repo, version):
+ # only use depth if the remote opject is branch or tag (i.e. fetchable)
+ cmd.extend([ '--depth', str(depth) ])
if reference:
cmd.extend([ '--reference', str(reference) ])
cmd.extend([ repo, dest ])
@@ -421,11 +443,12 @@ def reset(git_path, module, dest):
def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after):
''' Return the difference between 2 versions '''
- if before == None:
+ if before is None:
return { 'prepared': '>> Newly checked out %s' % after }
elif before != after:
# Ensure we have the object we are referring to during git diff !
- fetch(git_path, module, repo, dest, after, remote, depth, bare, '')
+ git_version_used = git_version(git_path, module)
+ fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used)
cmd = '%s diff %s %s' % (git_path, before, after)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc == 0 and out:
@@ -469,7 +492,7 @@ def get_remote_head(git_path, module, dest, version, remote, bare):
out = to_native(out)
if tag:
- # Find the dereferenced tag if this is an annotated tag.
+ # Find the dereferenced tag if this is an annotated tag.
for tag in out.split('\n'):
if tag.endswith(version + '^{}'):
out = tag
@@ -483,7 +506,7 @@ def get_remote_head(git_path, module, dest, version, remote, bare):
def is_remote_tag(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
- if to_bytes(version) in out:
+ if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
@@ -513,7 +536,7 @@ def get_tags(git_path, module, dest):
def is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
- if to_bytes(version) in out:
+ if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
@@ -530,8 +553,8 @@ def is_local_branch(git_path, module, dest, branch):
def is_not_a_branch(git_path, module, dest):
branches = get_branches(git_path, module, dest)
- for b in branches:
- if b.startswith('* ') and ('no branch' in b or 'detached from' in b):
+ for branch in branches:
+ if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch):
return True
return False
@@ -550,13 +573,23 @@ def get_head_branch(git_path, module, dest, remote, bare=False):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
- gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
- # There is a posibility the .git file to have an absolute path.
+ git_conf = open(repo_path, 'rb')
+ for line in git_conf:
+ config_val = line.split(b(':'), 1)
+ if config_val[0].strip() == b('gitdir'):
+ gitdir = to_native(config_val[1].strip(), errors='surrogate_or_strict')
+ break
+ else:
+ # No repo path found
+ return ''
+
+ # There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
except (IOError, AttributeError):
+ # No repo path found
return ''
# Read .git/HEAD for the name of the branch.
# If we're in a detached HEAD state, look up the branch associated with
@@ -590,11 +623,11 @@ def set_remote_url(git_path, module, repo, dest, remote):
label = "set a new url %s for %s" % (repo, remote)
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
- # Return False if remote_url is None to maintain previous bevhavior
+ # Return False if remote_url is None to maintain previous behavior
# for Git versions prior to 1.7.5 that lack required functionality.
return remote_url is not None
-def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec):
+def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used):
''' updates repo from remote sources '''
set_remote_url(git_path, module, repo, dest, remote)
commands = []
@@ -602,7 +635,6 @@ def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec):
fetch_str = 'download remote objects and refs'
fetch_cmd = [git_path, 'fetch']
-
refspecs = []
if depth:
# try to find the minimal set of refs we need to fetch to get a
@@ -614,7 +646,7 @@ def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec):
refspecs.append(currenthead)
elif is_remote_branch(git_path, module, dest, repo, version):
if currenthead != version:
- # this workaroung is only needed for older git versions
+ # this workaround is only needed for older git versions
# 1.8.3 is broken, 1.9.x works
# ensure that remote branch is available as both local and remote ref
refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version))
@@ -630,20 +662,22 @@ def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec):
# version
fetch_cmd.extend(['--depth', str(depth)])
- fetch_cmd.extend([remote])
if not depth or not refspecs:
# don't try to be minimalistic but do a full clone
# also do this if depth is given, but version is something that can't be fetched directly
if bare:
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
else:
- # unlike in bare mode, there's no way to combine the
- # additional refspec with the default git fetch behavior,
- # so use two commands
- commands.append((fetch_str, fetch_cmd))
- refspecs = ['+refs/tags/*:refs/tags/*']
+ # ensure all tags are fetched
+ if git_version_used >= LooseVersion('1.9'):
+ fetch_cmd.append('--tags')
+ else:
+ # old git versions have a bug in --tags that prevents updating existing tags
+ commands.append((fetch_str, fetch_cmd + [remote]))
+ refspecs = ['+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
+ fetch_cmd.extend([remote])
commands.append((fetch_str, fetch_cmd + refspecs))
@@ -672,7 +706,7 @@ def submodules_fetch(git_path, module, remote, track_submodules, dest):
if line.strip().startswith('url'):
repo = line.split('=', 1)[1].strip()
if module.params['ssh_opts'] is not None:
- if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']:
+ if "-o StrictHostKeyChecking=no" not in module.params['ssh_opts']:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
else:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
@@ -744,13 +778,22 @@ def set_remote_branch(git_path, module, dest, remote, version, depth):
def switch_version(git_path, module, dest, remote, version, verify_commit, depth):
cmd = ''
- if version != 'HEAD':
+ if version == 'HEAD':
+ branch = get_head_branch(git_path, module, dest, remote)
+ (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to checkout branch %s" % branch,
+ stdout=out, stderr=err, rc=rc)
+ cmd = "%s reset --hard %s" % (git_path, remote)
+ else:
+ # FIXME check for local_branch first, should have been fetched already
if is_remote_branch(git_path, module, dest, remote, version):
+ if depth and not is_local_branch(git_path, module, dest, version):
+ # git clone --depth implies --single-branch, which makes
+ # the checkout fail if the version changes
+ # fetch the remote branch, to be able to check it out next
+ set_remote_branch(git_path, module, dest, remote, version, depth)
if not is_local_branch(git_path, module, dest, version):
- if depth:
- # git clone --depth implies --single-branch, which makes
- # the checkout fail if the version changes
- set_remote_branch(git_path, module, dest, remote, version, depth)
cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
else:
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
@@ -760,13 +803,6 @@ def switch_version(git_path, module, dest, remote, version, verify_commit, depth
cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
else:
cmd = "%s checkout --force %s" % (git_path, version)
- else:
- branch = get_head_branch(git_path, module, dest, remote)
- (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
- if rc != 0:
- module.fail_json(msg="Failed to checkout branch %s" % branch,
- stdout=out, stderr=err, rc=rc)
- cmd = "%s reset --hard %s" % (git_path, remote)
(rc, out1, err1) = module.run_command(cmd, cwd=dest)
if rc != 0:
if version != 'HEAD':
@@ -783,7 +819,11 @@ def switch_version(git_path, module, dest, remote, version, verify_commit, depth
def verify_commit_sign(git_path, module, dest, version):
- cmd = "%s verify-commit %s" % (git_path, version)
+ if version in get_tags(git_path, module, dest):
+ git_sub = "verify-tag"
+ else:
+ git_sub = "verify-commit"
+ cmd = "%s %s %s" % (git_path, git_sub, version)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
@@ -851,7 +891,7 @@ def main():
result = dict( warnings=list() )
# evaluate and set the umask before doing anything else
- if umask != None:
+ if umask is not None:
if not isinstance(umask, string_types):
module.fail_json(msg="umask must be defined as a quoted octal integer")
try:
@@ -889,9 +929,9 @@ def main():
set_git_ssh(ssh_wrapper, key_file, ssh_opts)
module.add_cleanup_file(path=ssh_wrapper)
- # add the git repo's hostkey
+ # add the git repo's hostkey
if module.params['ssh_opts'] is not None:
- if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']:
+ if "-o StrictHostKeyChecking=no" not in module.params['ssh_opts']:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
else:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
@@ -906,7 +946,7 @@ def main():
result.update(before=None)
local_mods = False
- repo_updated = None
+ need_fetch = True
if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
# if there is no git configuration, do a clone operation unless:
# * the user requested no clone (they just want info)
@@ -922,7 +962,7 @@ def main():
module.exit_json(**result)
# there's no git config, so clone
clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit)
- repo_updated = True
+ need_fetch = False
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
@@ -948,41 +988,28 @@ def main():
remote_url_changed = remote_url and remote_url != repo and remote_url != unfrackgitpath(repo)
else:
remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
- if remote_url_changed:
- result.update(remote_url_changed=True)
+ result.update(remote_url_changed=remote_url_changed)
- remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
- if result['before'] == remote_head:
- if local_mods:
- result.update(changed=True, after=remote_head, msg='Local modifications exist')
+ if need_fetch:
+ if module.check_mode:
+ remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
+ result.update(changed=(result['before'] != remote_head), after=remote_head)
+ # FIXME: This diff should fail since the new remote_head is not fetched yet?!
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
- elif version == 'HEAD':
- # If the remote and local match and we're using the default of
- # HEAD (It's not a real tag) then exit early
- repo_updated = False
- elif is_remote_tag(git_path, module, dest, repo, version):
- # if the remote is a tag and we have the tag locally, exit early
- if version in get_tags(git_path, module, dest):
- repo_updated = False
else:
- # if the remote is a branch and we have the branch locally, exit early
- if version in get_branches(git_path, module, dest):
- repo_updated = False
+ fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used)
- if repo_updated is None:
- if module.check_mode:
- result.update(changed=(result['before']!=remote_head), after=remote_head)
- if module._diff:
- diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
- if diff:
- result['diff'] = diff
+ result['after'] = get_version(module, git_path, dest)
+
+ if result['before'] == result['after']:
+ if local_mods:
+ result.update(changed=True, after=remote_head, msg='Local modifications exist')
+ # no diff, since the repo didn't change
module.exit_json(**result)
- fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec)
- repo_updated = True
# switch to version specified regardless of whether
# we got new revisions from the repository
@@ -996,12 +1023,10 @@ def main():
if submodules_updated:
result.update(submodules_changed=submodules_updated)
- if module.check_mode:
- if submodules_updated:
+ if module.check_mode:
result.update(changed=True, after=remote_head)
module.exit_json(**result)
- if submodules_updated:
# Switch to version specified
submodule_update(git_path, module, dest, track_submodules, force=force)
@@ -1026,9 +1051,6 @@ def main():
module.exit_json(**result)
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.known_hosts import *
if __name__ == '__main__':
main()
diff --git a/source_control/hg.py b/source_control/hg.py
index 5f279e1a9ed..89845c197c9 100644
--- a/source_control/hg.py
+++ b/source_control/hg.py
@@ -23,6 +23,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: hg
@@ -41,6 +45,7 @@
dest:
description:
- Absolute path of where the repository should be cloned to.
+ This parameter is required, unless clone and update are set to no
required: true
default: null
revision:
@@ -70,6 +75,13 @@
version_added: "2.0"
description:
- If C(no), do not retrieve new revisions from the origin repository
+ clone:
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ version_added: "2.3"
+ description:
+ - If C(no), do not clone the repository if it does not exist locally.
executable:
required: false
default: null
@@ -87,7 +99,19 @@
EXAMPLES = '''
# Ensure the current working copy is inside the stable branch and deletes untracked files if any.
-- hg: repo=https://bitbucket.org/user/repo1 dest=/home/user/repo1 revision=stable purge=yes
+- hg:
+ repo: https://bitbucket.org/user/repo1
+ dest: /home/user/repo1
+ revision: stable
+ purge: yes
+
+# Example just get information about the repository whether or not it has
+# already been cloned locally.
+- hg:
+ repo: git://bitbucket.org/user/repo
+ dest: /srv/checkout
+ clone: no
+ update: no
'''
import os
@@ -129,6 +153,13 @@ def get_revision(self):
else:
return to_native(out).strip('\n')
+ def get_remote_revision(self):
+ (rc, out, err) = self._command(['id', self.repo])
+ if rc != 0:
+ self.module_fail_json(msg=err)
+ else:
+ return to_native(out).strip('\n')
+
def has_local_mods(self):
now = self.get_revision()
if '+' in now:
@@ -215,11 +246,12 @@ def main():
module = AnsibleModule(
argument_spec = dict(
repo = dict(required=True, aliases=['name']),
- dest = dict(required=True, type='path'),
+ dest = dict(type='path'),
revision = dict(default=None, aliases=['version']),
force = dict(default='no', type='bool'),
purge = dict(default='no', type='bool'),
update = dict(default='yes', type='bool'),
+ clone = dict(default='yes', type='bool'),
executable = dict(default=None),
),
)
@@ -229,22 +261,33 @@ def main():
force = module.params['force']
purge = module.params['purge']
update = module.params['update']
+ clone = module.params['clone']
hg_path = module.params['executable'] or module.get_bin_path('hg', True)
- hgrc = os.path.join(dest, '.hg/hgrc')
+ if dest is not None:
+ hgrc = os.path.join(dest, '.hg/hgrc')
# initial states
before = ''
changed = False
cleaned = False
+ if not dest and (clone or update):
+ module.fail_json(msg="the destination directory must be specified unless clone=no and update=no")
+
hg = Hg(module, dest, repo, revision, hg_path)
# If there is no hgrc file, then assume repo is absent
# and perform clone. Otherwise, perform pull and update.
+ if not clone and not update:
+ out = hg.get_remote_revision()
+ module.exit_json(after=out, changed=False)
if not os.path.exists(hgrc):
- (rc, out, err) = hg.clone()
- if rc != 0:
- module.fail_json(msg=err)
+ if clone:
+ (rc, out, err) = hg.clone()
+ if rc != 0:
+ module.fail_json(msg=err)
+ else:
+ module.exit_json(changed=False)
elif not update:
# Just return having found a repo already in the dest path
before = hg.get_revision()
diff --git a/source_control/subversion.py b/source_control/subversion.py
index 1f52a2925ed..09477700b9f 100644
--- a/source_control/subversion.py
+++ b/source_control/subversion.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: subversion
@@ -72,6 +76,20 @@
description:
- Path to svn executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
+ checkout:
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ version_added: "2.3"
+ description:
+ - If no, do not check out the repository if it does not exist locally
+ update:
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ version_added: "2.3"
+ description:
+ - If no, do not retrieve new revisions from the origin repository
export:
required: false
default: "no"
@@ -90,10 +108,22 @@
EXAMPLES = '''
# Checkout subversion repository to specified folder.
-- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout
+- subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /src/checkout
# Export subversion directory to folder
-- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True
+- subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /src/export
+
+# Example just get information about the repository whether or not it has
+# already been cloned locally.
+- subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /srv/checkout
+ checkout: no
+ update: no
'''
import re
@@ -168,14 +198,20 @@ def get_revision(self):
url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0)
return rev, url
+ def get_remote_revision(self):
+ '''Revision and URL of subversion working directory.'''
+ text = '\n'.join(self._exec(["info", self.repo]))
+ rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)
+ return rev
+
def has_local_mods(self):
'''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest])
# The --quiet option will return only modified files.
# Match only revisioned files, i.e. ignore status '?'.
regex = re.compile(r'^[^?X]')
- # Has local mods if more than 0 modifed revisioned files.
- return len(filter(regex.match, lines)) > 0
+ # Has local mods if more than 0 modified revisioned files.
+ return len(list(filter(regex.match, lines))) > 0
def needs_update(self):
curr, url = self.get_revision()
@@ -194,14 +230,16 @@ def needs_update(self):
def main():
module = AnsibleModule(
argument_spec=dict(
- dest=dict(required=True, type='path'),
+ dest=dict(type='path'),
repo=dict(required=True, aliases=['name', 'repository']),
revision=dict(default='HEAD', aliases=['rev', 'version']),
force=dict(default='no', type='bool'),
username=dict(required=False),
- password=dict(required=False),
+ password=dict(required=False, no_log=True),
executable=dict(default=None, type='path'),
export=dict(default=False, required=False, type='bool'),
+ checkout=dict(default=True, required=False, type='bool'),
+ update=dict(default=True, required=False, type='bool'),
switch=dict(default=True, required=False, type='bool'),
),
supports_check_mode=True
@@ -216,19 +254,28 @@ def main():
svn_path = module.params['executable'] or module.get_bin_path('svn', True)
export = module.params['export']
switch = module.params['switch']
+ checkout = module.params['checkout']
+ update = module.params['update']
# We screenscrape a huge amount of svn commands so use C locale anytime we
# call run_command()
module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C')
+ if not dest and (checkout or update or export):
+ module.fail_json(msg="the destination directory must be specified unless checkout=no, update=no, and export=no")
+
svn = Subversion(module, dest, repo, revision, username, password, svn_path)
+ if not export and not update and not checkout:
+ module.exit_json(changed=False, after=svn.get_remote_revision())
if export or not os.path.exists(dest):
before = None
local_mods = False
if module.check_mode:
module.exit_json(changed=True)
- if not export:
+ elif not export and not checkout:
+ module.exit_json(changed=False)
+ if not export and checkout:
svn.checkout()
else:
svn.export(force=force)
@@ -236,7 +283,7 @@ def main():
# Order matters. Need to get local mods before switch to avoid false
# positives. Need to switch before revert to ensure we are reverting to
# correct repo.
- if module.check_mode:
+ if module.check_mode or not update:
check, before, after = svn.needs_update()
module.exit_json(changed=check, before=before, after=after)
before = svn.get_revision()
@@ -261,4 +308,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/system/authorized_key.py b/system/authorized_key.py
index 4b0420708d1..c2023e30124 100644
--- a/system/authorized_key.py
+++ b/system/authorized_key.py
@@ -21,6 +21,10 @@
along with Ansible. If not, see .
"""
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: authorized_key
@@ -92,21 +96,27 @@
EXAMPLES = '''
# Example using key data from a local file on the management machine
-- authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
+- authorized_key:
+ user: charlie
+ key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
# Using github url as key source
-- authorized_key: user=charlie key=https://github.com/charlie.keys
+- authorized_key:
+ user: charlie
+ key: https://github.com/charlie.keys
# Using alternate directory locations:
- authorized_key:
user: charlie
key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
- path: '/etc/ssh/authorized_keys/charlie'
+ path: /etc/ssh/authorized_keys/charlie
manage_dir: no
# Using with_file
- name: Set up authorized_keys for the deploy user
- authorized_key: user=deploy key="{{ item }}"
+ authorized_key:
+ user: deploy
+ key: "{{ item }}"
with_file:
- public_keys/doe-jane
- public_keys/doe-john
@@ -114,19 +124,28 @@
# Using key_options:
- authorized_key:
user: charlie
- key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
+ key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
key_options: 'no-port-forwarding,from="10.0.1.1"'
# Using validate_certs:
-- authorized_key: user=charlie key=https://github.com/user.keys validate_certs=no
+- authorized_key:
+ user: charlie
+ key: https://github.com/user.keys
+ validate_certs: no
# Set up authorized_keys exclusively with one key
-- authorized_key: user=root key="{{ item }}" state=present exclusive=yes
+- authorized_key:
+ user: root
+ key: "{{ item }}"
+ state: present
+ exclusive: yes
with_file:
- public_keys/doe-jane
# Copies the key from the user who is running ansible to the remote machine user ubuntu
-- authorized_key: user=ubuntu key="{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
+- authorized_key:
+ user: ubuntu
+ key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
become: yes
'''
@@ -143,34 +162,82 @@
#
# see example in examples/playbooks
-import sys
import os
import pwd
import os.path
import tempfile
import re
import shlex
+from operator import itemgetter
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import fetch_url
class keydict(dict):
- """ a dictionary that maintains the order of keys as they are added """
+ """ a dictionary that maintains the order of keys as they are added
+
+ This has become an abuse of the dict interface. Probably should be
+ rewritten to be an entirely custom object with methods instead of
+ bracket-notation.
+
+ Our requirements are for a data structure that:
+ * Preserves insertion order
+ * Can store multiple values for a single key.
+
+ The present implementation has the following functions used by the rest of
+ the code:
+
+ * __setitem__(): to add a key=value. The value can never be disassociated
+ with the key, only new values can be added in addition.
+ * items(): to retrieve the key, value pairs.
+
+ Other dict methods should work but may be surprising. For instance, there
+ will be multiple keys that are the same in keys() and __getitem__() will
+ return a list of the values that have been set via __setitem__.
+ """
# http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class
def __init__(self, *args, **kw):
super(keydict,self).__init__(*args, **kw)
- self.itemlist = super(keydict,self).keys()
+ self.itemlist = list(super(keydict,self).keys())
+
def __setitem__(self, key, value):
self.itemlist.append(key)
- super(keydict,self).__setitem__(key, value)
+ if key in self:
+ self[key].append(value)
+ else:
+ super(keydict, self).__setitem__(key, [value])
+
def __iter__(self):
return iter(self.itemlist)
+
def keys(self):
- return list(set(self.itemlist))
- def values(self):
- return [self[key] for key in self]
+ return self.itemlist
+
+ def _item_generator(self):
+ indexes = {}
+ for key in self.itemlist:
+ if key in indexes:
+ indexes[key] += 1
+ else:
+ indexes[key] = 0
+ yield key, self[key][indexes[key]]
+
+ def iteritems(self):
+ return self._item_generator()
+
+ def items(self):
+ return list(self.iteritems())
+
def itervalues(self):
- return (self[key] for key in self)
+ return (item[1] for item in self.iteritems())
+
+ def values(self):
+ return list(self.itervalues())
+
def keyfile(module, user, write=False, path=None, manage_dir=True):
"""
@@ -243,32 +310,26 @@ def parseoptions(module, options):
'''
options_dict = keydict() #ordered dict
if options:
- try:
- # the following regex will split on commas while
- # ignoring those commas that fall within quotes
- regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
- parts = regex.split(options)[1:-1]
- for part in parts:
- if "=" in part:
- (key, value) = part.split("=", 1)
- if options_dict.has_key(key):
- if isinstance(options_dict[key], list):
- options_dict[key].append(value)
- else:
- options_dict[key] = [options_dict[key], value]
- else:
- options_dict[key] = value
- elif part != ",":
- options_dict[part] = None
- except:
- module.fail_json(msg="invalid option string: %s" % options)
+ # the following regex will split on commas while
+ # ignoring those commas that fall within quotes
+ regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
+ parts = regex.split(options)[1:-1]
+ for part in parts:
+ if "=" in part:
+ (key, value) = part.split("=", 1)
+ options_dict[key] = value
+ elif part != ",":
+ options_dict[part] = None
return options_dict
-def parsekey(module, raw_key):
+def parsekey(module, raw_key, rank=None):
'''
parses a key, which may or may not contain a list
of ssh-key options at the beginning
+
+ rank indicates the keys original ordering, so that
+ it can be written out in the same order.
'''
VALID_SSH2_KEY_TYPES = [
@@ -295,6 +356,10 @@ def parsekey(module, raw_key):
lex.whitespace_split = True
key_parts = list(lex)
+ if key_parts and key_parts[0] == '#':
+ # comment line, invalid line, etc.
+ return (raw_key, 'skipped', None, None, rank)
+
for i in range(0, len(key_parts)):
if key_parts[i] in VALID_SSH2_KEY_TYPES:
type_index = i
@@ -317,7 +382,7 @@ def parsekey(module, raw_key):
if len(key_parts) > (type_index + 1):
comment = " ".join(key_parts[(type_index + 2):])
- return (key, key_type, options, comment)
+ return (key, key_type, options, comment, rank)
def readkeys(module, filename):
@@ -326,15 +391,15 @@ def readkeys(module, filename):
keys = {}
f = open(filename)
- for line in f.readlines():
- key_data = parsekey(module, line)
+ for rank_index, line in enumerate(f.readlines()):
+ key_data = parsekey(module, line, rank=rank_index)
if key_data:
# use key as identifier
keys[key_data[0]] = key_data
else:
- # for an invalid line, just append the line
- # to the array so it will be re-output later
- keys[line] = line
+ # for an invalid line, just set the line
+ # dict key to the line so it will be re-output later
+ keys[line] = (line, 'skipped', None, None, rank_index)
f.close()
return keys
@@ -342,25 +407,36 @@ def writekeys(module, filename, keys):
fd, tmp_path = tempfile.mkstemp('', 'tmp', os.path.dirname(filename))
f = open(tmp_path,"w")
+
+ # FIXME: only the f.writelines() needs to be in try clause
try:
- for index, key in keys.items():
+ new_keys = keys.values()
+ # order the new_keys by their original ordering, via the rank item in the tuple
+ ordered_new_keys = sorted(new_keys, key=itemgetter(4))
+
+ for key in ordered_new_keys:
try:
- (keyhash,type,options,comment) = key
+ (keyhash, key_type, options, comment, rank) = key
+
option_str = ""
if options:
option_strings = []
- for option_key in options.keys():
- if options[option_key]:
- if isinstance(options[option_key], list):
- for value in options[option_key]:
- option_strings.append("%s=%s" % (option_key, value))
- else:
- option_strings.append("%s=%s" % (option_key, options[option_key]))
- else:
+ for option_key, value in options.items():
+ if value is None:
option_strings.append("%s" % option_key)
+ else:
+ option_strings.append("%s=%s" % (option_key, value))
option_str = ",".join(option_strings)
option_str += " "
- key_line = "%s%s %s %s\n" % (option_str, type, keyhash, comment)
+
+ # comment line or invalid line, just leave it
+ if not key_type:
+ key_line = key
+
+ if key_type == 'skipped':
+ key_line = key[0]
+ else:
+ key_line = "%s%s %s %s\n" % (option_str, key_type, keyhash, comment)
except:
key_line = key
f.writelines(key_line)
@@ -382,7 +458,6 @@ def enforce_state(module, params):
state = params.get("state", "present")
key_options = params.get("key_options", None)
exclusive = params.get("exclusive", False)
- validate_certs = params.get("validate_certs", True)
error_msg = "Error getting key from: %s"
# if the key is a url, request it and use it as key source
@@ -397,45 +472,47 @@ def enforce_state(module, params):
module.fail_json(msg=error_msg % key)
# extract individual keys into an array, skipping blank lines and comments
- key = [s for s in key.splitlines() if s and not s.startswith('#')]
+ new_keys = [s for s in key.splitlines() if s and not s.startswith('#')]
# check current state -- just get the filename, don't create file
do_write = False
params["keyfile"] = keyfile(module, user, do_write, path, manage_dir)
existing_keys = readkeys(module, params["keyfile"])
-
# Add a place holder for keys that should exist in the state=present and
# exclusive=true case
keys_to_exist = []
+ # we will order any non exclusive new keys higher than all the existing keys,
+ # resulting in the new keys being written to the key file after existing keys, but
+ # in the order of new_keys
+ max_rank_of_existing_keys = len(existing_keys)
+
# Check our new keys, if any of them exist we'll continue.
- for new_key in key:
- parsed_new_key = parsekey(module, new_key)
+ for rank_index, new_key in enumerate(new_keys):
+ parsed_new_key = parsekey(module, new_key, rank=rank_index)
if not parsed_new_key:
module.fail_json(msg="invalid key specified: %s" % new_key)
if key_options is not None:
parsed_options = parseoptions(module, key_options)
- parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3])
+ # rank here is the rank in the provided new keys, which may be unrelated to rank in existing_keys
+ parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3], parsed_new_key[4])
- present = False
matched = False
non_matching_keys = []
if parsed_new_key[0] in existing_keys:
- present = True
- # Then we check if everything matches, including
+ # Then we check if everything (except the rank at index 4) matches, including
# the key type and options. If not, we append this
# existing key to the non-matching list
# We only want it to match everything when the state
# is present
- if parsed_new_key != existing_keys[parsed_new_key[0]] and state == "present":
+ if parsed_new_key[:4] != existing_keys[parsed_new_key[0]][:4] and state == "present":
non_matching_keys.append(existing_keys[parsed_new_key[0]])
else:
matched = True
-
# handle idempotent state=present
if state=="present":
keys_to_exist.append(parsed_new_key[0])
@@ -445,8 +522,12 @@ def enforce_state(module, params):
del existing_keys[non_matching_key[0]]
do_write = True
+ # new key that didn't exist before. Where should it go in the ordering?
if not matched:
- existing_keys[parsed_new_key[0]] = parsed_new_key
+ # We want the new key to be after existing keys if not exclusive (rank > max_rank_of_existing_keys)
+ total_rank = max_rank_of_existing_keys + parsed_new_key[4]
+ # replace existing key tuple with new parsed key with its total rank
+ existing_keys[parsed_new_key[0]] = (parsed_new_key[0], parsed_new_key[1], parsed_new_key[2], parsed_new_key[3], total_rank)
do_write = True
elif state=="absent":
@@ -456,6 +537,7 @@ def enforce_state(module, params):
do_write = True
# remove all other keys to honor exclusive
+ # for 'exclusive', make sure keys are written in the order the new keys were
if state == "present" and exclusive:
to_remove = frozenset(existing_keys).difference(keys_to_exist)
for key in to_remove:
@@ -474,7 +556,6 @@ def enforce_state(module, params):
return params
def main():
-
module = AnsibleModule(
argument_spec = dict(
user = dict(required=True, type='str'),
@@ -493,7 +574,5 @@ def main():
results = enforce_state(module, module.params)
module.exit_json(**results)
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/cron.py b/system/cron.py
index 0c17777be4e..6e87147f39e 100644
--- a/system/cron.py
+++ b/system/cron.py
@@ -22,7 +22,7 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
-# Cron Plugin: The goal of this plugin is to provide an indempotent method for
+# Cron Plugin: The goal of this plugin is to provide an idempotent method for
# setting up cron jobs on a host. The script will play well with other manually
# entered crons. Each cron job entered will be preceded with a comment
# describing the job so that it can be found later, which is required to be
@@ -31,6 +31,10 @@
# This module is based on python-crontab by Martin Owens.
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = """
---
module: cron
@@ -139,7 +143,7 @@
env:
description:
- If set, manages a crontab's environment variable. New variables are added on top of crontab.
- "name" and "value" paramenters are the name and the value of environment variable.
+ "name" and "value" parameters are the name and the value of environment variable.
version_added: "2.1"
required: false
default: "no"
@@ -171,40 +175,74 @@
EXAMPLES = '''
# Ensure a job that runs at 2 and 5 exists.
# Creates an entry like "0 5,2 * * ls -alh > /dev/null"
-- cron: name="check dirs" minute="0" hour="5,2" job="ls -alh > /dev/null"
+- cron:
+ name: "check dirs"
+ minute: "0"
+ hour: "5,2"
+ job: "ls -alh > /dev/null"
# Ensure an old job is no longer present. Removes any job that is prefixed
# by "#Ansible: an old job" from the crontab
-- cron: name="an old job" state=absent
+- cron:
+ name: "an old job"
+ state: absent
# Creates an entry like "@reboot /some/job.sh"
-- cron: name="a job for reboot" special_time=reboot job="/some/job.sh"
+- cron:
+ name: "a job for reboot"
+ special_time: reboot
+ job: "/some/job.sh"
# Creates an entry like "PATH=/opt/bin" on top of crontab
-- cron: name=PATH env=yes value=/opt/bin
+- cron:
+ name: PATH
+ env: yes
+ value: /opt/bin
# Creates an entry like "APP_HOME=/srv/app" and insert it after PATH
# declaration
-- cron: name=APP_HOME env=yes value=/srv/app insertafter=PATH
+- cron:
+ name: APP_HOME
+ env: yes
+ value: /srv/app
+ insertafter: PATH
# Creates a cron file under /etc/cron.d
-- cron: name="yum autoupdate" weekday="2" minute=0 hour=12
- user="root" job="YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
- cron_file=ansible_yum-autoupdate
+- cron:
+ name: yum autoupdate
+ weekday: 2
+ minute: 0
+ hour: 12
+ user: root
+ job: "YUMINTERACTIVE: 0 /usr/sbin/yum-autoupdate"
+ cron_file: ansible_yum-autoupdate
# Removes a cron file from under /etc/cron.d
-- cron: name="yum autoupdate" cron_file=ansible_yum-autoupdate state=absent
+- cron:
+ name: "yum autoupdate"
+ cron_file: ansible_yum-autoupdate
+ state: absent
# Removes "APP_HOME" environment variable from crontab
-- cron: name=APP_HOME env=yes state=absent
+- cron:
+ name: APP_HOME
+ env: yes
+ state: absent
'''
import os
+import pwd
import re
import tempfile
import platform
import pipes
+try:
+ import selinux
+ HAS_SELINUX = True
+except ImportError:
+ HAS_SELINUX = False
+
CRONCMD = "/usr/bin/crontab"
class CronTabError(Exception):
@@ -223,6 +261,7 @@ def __init__(self, module, user=None, cron_file=None):
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
+ self.existing = ''
if cron_file:
if os.path.isabs(cron_file):
@@ -241,7 +280,8 @@ def read(self):
# read the cronfile
try:
f = open(self.cron_file, 'r')
- self.lines = f.read().splitlines()
+ self.existing = f.read()
+ self.lines = self.existing.splitlines()
f.close()
except IOError:
# cron file does not exist
@@ -255,6 +295,8 @@ def read(self):
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
+ self.existing = out
+
lines = out.splitlines()
count = 0
for l in lines:
@@ -262,6 +304,9 @@ def read(self):
not re.match( r'# \(/tmp/.*installed on.*\)', l) and
not re.match( r'# \(.*version.*\)', l)):
self.lines.append(l)
+ else:
+ pattern = re.escape(l) + '[\r\n]?'
+ self.existing = re.sub(pattern, '', self.existing, 1)
count += 1
def is_empty(self):
@@ -299,9 +344,16 @@ def write(self, backup_file=None):
if rc != 0:
self.module.fail_json(msg=err)
+ # set SELinux permissions
+ if HAS_SELINUX:
+ selinux.selinux_lsetfilecon_default(self.cron_file)
+
+ def do_comment(self, name):
+ return "%s%s" % (self.ansible, name)
+
def add_job(self, name, job):
# Add the comment
- self.lines.append("%s%s" % (self.ansible, name))
+ self.lines.append(self.do_comment(name))
# Add the job
self.lines.append("%s" % (job))
@@ -362,7 +414,8 @@ def remove_job_file(self):
except:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
- def find_job(self, name):
+ def find_job(self, name, job=None):
+ # attempt to find job by 'Ansible:' header comment
comment = None
for l in self.lines:
if comment is not None:
@@ -373,6 +426,19 @@ def find_job(self, name):
elif re.match( r'%s' % self.ansible, l):
comment = re.sub( r'%s' % self.ansible, '', l)
+ # failing that, attempt to find job by exact match
+ if job:
+ for i, l in enumerate(self.lines):
+ if l == job:
+ # if no leading ansible header, insert one
+ if not re.match( r'%s' % self.ansible, self.lines[i-1]):
+ self.lines.insert(i, self.do_comment(name))
+ return [self.lines[i], l, True]
+ # if a leading blank ansible header AND job has a name, update header
+ elif name and self.lines[i-1] == self.do_comment(None):
+ self.lines[i-1] = self.do_comment(name)
+ return [self.lines[i-1], l, True]
+
return []
def find_env(self, name):
@@ -423,7 +489,7 @@ def get_envnames(self):
return envnames
def _update_job(self, name, job, addlinesfunction):
- ansiblename = "%s%s" % (self.ansible, name)
+ ansiblename = self.do_comment(name)
newlines = []
comment = None
@@ -463,8 +529,8 @@ def render(self):
crons.append(cron)
result = '\n'.join(crons)
- if result and result[-1] not in ['\n', '\r']:
- result += '\n'
+ if result:
+ result = result.rstrip('\r\n') + '\n'
return result
def _read_user_execute(self):
@@ -479,7 +545,7 @@ def _read_user_execute(self):
return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user))
- elif os.getlogin() != self.user:
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, '-l')
@@ -491,7 +557,7 @@ def _write_execute(self, path):
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path))
- elif os.getlogin() != self.user:
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, pipes.quote(path))
@@ -580,7 +646,7 @@ def main():
if module._diff:
diff = dict()
- diff['before'] = crontab.render()
+ diff['before'] = crontab.existing
if crontab.cron_file:
diff['before_header'] = crontab.cron_file
else:
@@ -644,21 +710,31 @@ def main():
crontab.remove_env(name)
changed = True
else:
- job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
- old_job = crontab.find_job(name)
-
if do_install:
+ job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
+ old_job = crontab.find_job(name, job)
+
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
+ if len(old_job) > 2:
+ crontab.update_job(name, job)
+ changed = True
else:
+ old_job = crontab.find_job(name)
+
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
+ # no changes to env/job, but existing crontab needs a terminating newline
+ if not changed:
+ if not (crontab.existing.endswith('\r') or crontab.existing.endswith('\n')):
+ changed = True
+
res_args = dict(
jobs = crontab.get_jobnames(),
envs = crontab.get_envnames(),
@@ -699,5 +775,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
-
+if __name__ == '__main__':
+ main()
diff --git a/system/group.py b/system/group.py
index 8edb93a1d0a..132d71ad2d3 100644
--- a/system/group.py
+++ b/system/group.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: group
@@ -53,7 +57,9 @@
EXAMPLES = '''
# Example group command from Ansible Playbooks
-- group: name=somegroup state=present
+- group:
+ name: somegroup
+ state: present
'''
import grp
@@ -144,7 +150,7 @@ class SunOS(Group):
This overrides the following methods from the generic class:-
- group_add()
- """
+ """
platform = 'SunOS'
distribution = None
@@ -258,7 +264,7 @@ class DarwinGroup(Group):
- group_add()
- group_mod()
- group manupulation are done using dseditgroup(1).
+ group manipulation are done using dseditgroup(1).
"""
platform = 'Darwin'
@@ -269,6 +275,11 @@ def group_add(self, **kwargs):
cmd += [ '-o', 'create' ]
if self.gid is not None:
cmd += [ '-i', self.gid ]
+ elif 'system' in kwargs and kwargs['system'] == True:
+ gid = self.get_lowest_available_system_gid()
+ if gid != False:
+ self.gid = str(gid)
+ cmd += [ '-i', self.gid ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
@@ -291,6 +302,26 @@ def group_mod(self, gid=None):
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
return (None, '', '')
+
+ def get_lowest_available_system_gid(self):
+ # check for lowest available system gid (< 500)
+ try:
+ cmd = [self.module.get_bin_path('dscl', True)]
+ cmd += [ '/Local/Default', '-list', '/Groups', 'PrimaryGroupID']
+ (rc, out, err) = self.execute_command(cmd)
+ lines = out.splitlines()
+ highest = 0
+ for group_info in lines:
+ parts = group_info.split(' ')
+ if len(parts) > 1:
+ gid = int(parts[-1])
+ if gid > highest and gid < 500:
+ highest = gid
+ if highest == 0 or highest == 499:
+ return False
+ return (highest + 1)
+ except:
+ return False
class OpenBsdGroup(Group):
"""
@@ -439,4 +470,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/system/hostname.py b/system/hostname.py
index d16b1ccca28..c6432428fca 100644
--- a/system/hostname.py
+++ b/system/hostname.py
@@ -18,18 +18,22 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: hostname
author:
- - "Hiroaki Nakamura (@hnakamur)"
+ - "Adrian Likins (@alikins)"
- "Hideki Saito (@saito-hideki)"
version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- Set system's hostname.
- - Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI.
+ - Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI, Alpine Linux.
- Any distribution that uses systemd as their init system.
- Note, this module does *NOT* modify /etc/hosts. You need to modify it yourself using other modules like template or replace.
options:
@@ -40,7 +44,8 @@
'''
EXAMPLES = '''
-- hostname: name=web01
+- hostname:
+ name: web01
'''
import socket
@@ -56,6 +61,15 @@ class UnimplementedStrategy(object):
def __init__(self, module):
self.module = module
+ def update_current_and_permanent_hostname(self):
+ self.unimplemented_error()
+
+ def update_current_hostname(self):
+ self.unimplemented_error()
+
+ def update_permanent_hostname(self):
+ self.unimplemented_error()
+
def get_current_hostname(self):
self.unimplemented_error()
@@ -103,6 +117,9 @@ def __init__(self, module):
else:
self.strategy = self.strategy_class(module)
+ def update_current_and_permanent_hostname(self):
+ return self.strategy.update_current_and_permanent_hostname()
+
def get_current_hostname(self):
return self.strategy.get_current_hostname()
@@ -129,6 +146,26 @@ class GenericStrategy(object):
def __init__(self, module):
self.module = module
self.hostname_cmd = self.module.get_bin_path('hostname', True)
+ self.changed = False
+
+ def update_current_and_permanent_hostname(self):
+ self.update_current_hostname()
+ self.update_permanent_hostname()
+ return self.changed
+
+ def update_current_hostname(self):
+ name = self.module.params['name']
+ current_name = self.get_current_hostname()
+ if current_name != name:
+ self.set_current_hostname(name)
+ self.changed = True
+
+ def update_permanent_hostname(self):
+ name = self.module.params['name']
+ permanent_name = self.get_permanent_hostname()
+ if permanent_name != name:
+ self.set_permanent_hostname(name)
+ self.changed = True
def get_current_hostname(self):
cmd = [self.hostname_cmd]
@@ -283,6 +320,59 @@ def set_permanent_hostname(self, name):
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
+# ===========================================
+
+class AlpineStrategy(GenericStrategy):
+ """
+ This is a Alpine Linux Hostname manipulation strategy class - it edits
+ the /etc/hostname file then run hostname -F /etc/hostname.
+ """
+
+ HOSTNAME_FILE = '/etc/hostname'
+
+ def update_current_and_permanent_hostname(self):
+ self.update_permanent_hostname()
+ self.update_current_hostname()
+ return self.changed
+
+ def get_permanent_hostname(self):
+ if not os.path.isfile(self.HOSTNAME_FILE):
+ try:
+ open(self.HOSTNAME_FILE, "a").write("")
+ except IOError:
+ err = get_exception()
+ self.module.fail_json(msg="failed to write file: %s" %
+ str(err))
+ try:
+ f = open(self.HOSTNAME_FILE)
+ try:
+ return f.read().strip()
+ finally:
+ f.close()
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(msg="failed to read hostname: %s" %
+ str(err))
+
+ def set_permanent_hostname(self, name):
+ try:
+ f = open(self.HOSTNAME_FILE, 'w+')
+ try:
+ f.write("%s\n" % name)
+ finally:
+ f.close()
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(msg="failed to update hostname: %s" %
+ str(err))
+
+ def set_current_hostname(self, name):
+ cmd = [self.hostname_cmd, '-F', self.HOSTNAME_FILE]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
+ (rc, out, err))
+
# ===========================================
@@ -567,6 +657,11 @@ class ScientificLinuxHostname(Hostname):
distribution = 'Scientific linux'
strategy_class = RedHatStrategy
+class ScientificLinuxCERNHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Scientific linux cern slc'
+ strategy_class = RedHatStrategy
+
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle linux server'
@@ -612,6 +707,11 @@ class ALTLinuxHostname(Hostname):
distribution = 'Altlinux'
strategy_class = RedHatStrategy
+class AlpineLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Alpine'
+ strategy_class = AlpineStrategy
+
class OpenBSDHostname(Hostname):
platform = 'OpenBSD'
distribution = None
@@ -638,18 +738,8 @@ def main():
)
hostname = Hostname(module)
-
- changed = False
name = module.params['name']
- current_name = hostname.get_current_hostname()
- if current_name != name:
- hostname.set_current_hostname(name)
- changed = True
-
- permanent_name = hostname.get_permanent_hostname()
- if permanent_name != name:
- hostname.set_permanent_hostname(name)
- changed = True
+ changed = hostname.update_current_and_permanent_hostname()
module.exit_json(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
diff --git a/system/mount.py b/system/mount.py
index 0be9ee2416e..b8fad7747b3 100644
--- a/system/mount.py
+++ b/system/mount.py
@@ -20,135 +20,223 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import get_platform
+from ansible.module_utils.ismount import ismount
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.six import iteritems
+import os
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: mount
short_description: Control active and configured mount points
description:
- - This module controls active and configured mount points in C(/etc/fstab).
+ - This module controls active and configured mount points in C(/etc/fstab).
+author:
+ - Ansible Core Team
+ - Seth Vidal
version_added: "0.6"
options:
name:
description:
- - "path to the mount point, eg: C(/mnt/files)"
+ - Path to the mount point (e.g. C(/mnt/files))
required: true
src:
description:
- - device to be mounted on I(name). Required when C(state=present) or C(state=mounted)
+ - Device to be mounted on I(name). Required when I(state) set to
+ C(present) or C(mounted).
required: false
default: null
fstype:
description:
- - file-system type. Required when C(state=present) or C(state=mounted)
+ - Filesystem type. Required when I(state) is C(present) or C(mounted).
required: false
default: null
opts:
description:
- - mount options (see fstab(5))
+ - Mount options (see fstab(5), or vfstab(4) on Solaris).
required: false
default: null
dump:
description:
- - "dump (see fstab(5)), Note that if nulled, C(state=present) will cease to work and duplicate entries will be made with subsequent runs."
+ - Dump (see fstab(5)). Note that if set to C(null) and I(state) set to
+ C(present), it will cease to work and duplicate entries will be made
+ with subsequent runs.
+ - Has no effect on Solaris systems.
required: false
default: 0
passno:
description:
- - "passno (see fstab(5)), Note that if nulled, C(state=present) will cease to work and duplicate entries will be made with subsequent runs."
+ - Passno (see fstab(5)). Note that if set to C(null) and I(state) set to
+ C(present), it will cease to work and duplicate entries will be made
+ with subsequent runs.
+ - Deprecated on Solaris systems.
required: false
default: 0
state:
description:
- - If C(mounted) or C(unmounted), the device will be actively mounted or unmounted as needed and appropriately configured in I(fstab).
- - C(absent) and C(present) only deal with I(fstab) but will not affect current mounting.
- - If specifying C(mounted) and the mount point is not present, the mount point will be created. Similarly.
- - Specifying C(absent) will remove the mount point directory.
+ - If C(mounted) or C(unmounted), the device will be actively mounted or
+ unmounted as needed and appropriately configured in I(fstab).
+ - C(absent) and C(present) only deal with I(fstab) but will not affect
+ current mounting.
+ - If specifying C(mounted) and the mount point is not present, the mount
+ point will be created.
+ - Similarly, specifying C(absent) will remove the mount point directory.
required: true
- choices: [ "present", "absent", "mounted", "unmounted" ]
+ choices: ["present", "absent", "mounted", "unmounted"]
fstab:
description:
- - file to use instead of C(/etc/fstab). You shouldn't use that option
+ - File to use instead of C(/etc/fstab). You shouldn't use this option
unless you really know what you are doing. This might be useful if
- you need to configure mountpoints in a chroot environment.
+ you need to configure mountpoints in a chroot environment. OpenBSD
+ does not allow specifying alternate fstab files with mount so do not
+ use this on OpenBSD with any state that operates on the live filesystem.
required: false
- default: /etc/fstab
-
-author:
- - Ansible Core Team
- - Seth Vidal
+ default: /etc/fstab (/etc/vfstab on Solaris)
+ boot:
+ version_added: 2.2
+ description:
+ - Determines if the filesystem should be mounted on boot.
+ - Only applies to Solaris systems.
+ required: false
+ default: yes
+ choices: ["yes", "no"]
'''
-EXAMPLES = '''
-# Mount DVD read-only
-- mount: name=/mnt/dvd src=/dev/sr0 fstype=iso9660 opts=ro state=present
-# Mount up device by label
-- mount: name=/srv/disk src='LABEL=SOME_LABEL' fstype=ext4 state=present
-
-# Mount up device by UUID
-- mount: name=/home src='UUID=b3e48f45-f933-4c8e-a700-22a159ec9077' fstype=xfs opts=noatime state=present
+EXAMPLES = '''
+- name: Mount DVD read-only
+ mount:
+ name: /mnt/dvd
+ src: /dev/sr0
+ fstype: iso9660
+ opts: ro
+ state: present
+
+- name: Mount up device by label
+ mount:
+ name: /srv/disk
+ src: LABEL=SOME_LABEL
+ fstype: ext4
+ state: present
+
+- name: Mount up device by UUID
+ mount:
+ name: /home
+ src: UUID=b3e48f45-f933-4c8e-a700-22a159ec9077
+ fstype: xfs
+ opts: noatime
+ state: present
'''
def write_fstab(lines, dest):
-
fs_w = open(dest, 'w')
+
for l in lines:
fs_w.write(l)
fs_w.flush()
fs_w.close()
+
def _escape_fstab(v):
- """ escape space (040), ampersand (046) and backslash (134) which are invalid in fstab fields """
+ """Escape invalid characters in fstab fields.
+
+ space (040)
+ ampersand (046)
+ backslash (134)
+ """
+
if isinstance(v, int):
return v
else:
- return v.replace('\\', '\\134').replace(' ', '\\040').replace('&', '\\046')
-
-def set_mount(module, **kwargs):
- """ set/change a mount point location in fstab """
+ return(
+ v.
+ replace('\\', '\\134').
+ replace(' ', '\\040').
+ replace('&', '\\046'))
- # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
- args = dict(
- opts = 'defaults',
- dump = '0',
- passno = '0',
- fstab = '/etc/fstab'
- )
- args.update(kwargs)
- new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
+def set_mount(module, args):
+ """Set/change a mount point location in fstab."""
to_write = []
exists = False
changed = False
- escaped_args = dict([(k, _escape_fstab(v)) for k, v in args.iteritems()])
+ escaped_args = dict([(k, _escape_fstab(v)) for k, v in iteritems(args)])
+ new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
+
+ if get_platform() == 'SunOS':
+ new_line = (
+ '%(src)s - %(name)s %(fstype)s %(passno)s %(boot)s %(opts)s\n')
+
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
+
continue
+
if line.strip().startswith('#'):
to_write.append(line)
+
continue
- if len(line.split()) != 6:
- # not sure what this is or why it is here
- # but it is not our fault so leave it be
+
+ # Check if we got a valid line for splitting
+ if (
+ get_platform() == 'SunOS' and len(line.split()) != 7 or
+ get_platform() != 'SunOS' and len(line.split()) != 6):
to_write.append(line)
+
continue
ld = {}
- ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
+ if get_platform() == 'SunOS':
+ (
+ ld['src'],
+ dash,
+ ld['name'],
+ ld['fstype'],
+ ld['passno'],
+ ld['boot'],
+ ld['opts']
+ ) = line.split()
+ else:
+ (
+ ld['src'],
+ ld['name'],
+ ld['fstype'],
+ ld['opts'],
+ ld['dump'],
+ ld['passno']
+ ) = line.split()
+
+ # Check if we found the correct line
if ld['name'] != escaped_args['name']:
to_write.append(line)
+
continue
- # it exists - now see if what we have is different
+ # If we got here we found a match - let's check if there is any
+ # difference
exists = True
- for t in ('src', 'fstype','opts', 'dump', 'passno'):
+ args_to_check = ('src', 'fstype', 'opts', 'dump', 'passno')
+
+ if get_platform() == 'SunOS':
+ args_to_check = ('src', 'fstype', 'passno', 'boot', 'opts')
+
+ for t in args_to_check:
if ld[t] != escaped_args[t]:
- changed = True
ld[t] = escaped_args[t]
+ changed = True
if changed:
to_write.append(new_line % ld)
@@ -165,42 +253,60 @@ def set_mount(module, **kwargs):
return (args['name'], changed)
-def unset_mount(module, **kwargs):
- """ remove a mount point from fstab """
-
- # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
- args = dict(
- opts = 'default',
- dump = '0',
- passno = '0',
- fstab = '/etc/fstab'
- )
- args.update(kwargs)
+def unset_mount(module, args):
+ """Remove a mount point from fstab."""
to_write = []
changed = False
escaped_name = _escape_fstab(args['name'])
+
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
+
continue
+
if line.strip().startswith('#'):
to_write.append(line)
+
continue
- if len(line.split()) != 6:
- # not sure what this is or why it is here
- # but it is not our fault so leave it be
+
+ # Check if we got a valid line for splitting
+ if (
+ get_platform() == 'SunOS' and len(line.split()) != 7 or
+ get_platform() != 'SunOS' and len(line.split()) != 6):
to_write.append(line)
+
continue
ld = {}
- ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
+
+ if get_platform() == 'SunOS':
+ (
+ ld['src'],
+ dash,
+ ld['name'],
+ ld['fstype'],
+ ld['passno'],
+ ld['boot'],
+ ld['opts']
+ ) = line.split()
+ else:
+ (
+ ld['src'],
+ ld['name'],
+ ld['fstype'],
+ ld['opts'],
+ ld['dump'],
+ ld['passno']
+ ) = line.split()
if ld['name'] != escaped_name:
to_write.append(line)
+
continue
- # if we got here we found a match - continue and mark changed
+ # If we got here we found a match - continue and mark changed
changed = True
if changed and not module.check_mode:
@@ -208,113 +314,371 @@ def unset_mount(module, **kwargs):
return (args['name'], changed)
+def _set_fstab_args(fstab_file):
+ result = []
+ if fstab_file and fstab_file != '/etc/fstab':
+ if get_platform().lower().endswith('bsd'):
+ result.append('-F')
+ else:
+ result.append('-T')
+ result.append(fstab_file)
+ return result
-def mount(module, **kwargs):
- """ mount up a path or remount if needed """
-
- # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
- args = dict(
- opts = 'default',
- dump = '0',
- passno = '0',
- fstab = '/etc/fstab'
- )
- args.update(kwargs)
-
- mount_bin = module.get_bin_path('mount')
-
- name = kwargs['name']
+def mount(module, args):
+ """Mount up a path or remount if needed."""
- cmd = [ mount_bin, ]
+ mount_bin = module.get_bin_path('mount', required=True)
+ name = args['name']
+ cmd = [mount_bin]
if ismount(name):
- cmd += [ '-o', 'remount', ]
+ return remount(module, mount_bin, args)
- if get_platform().lower() == 'freebsd':
- cmd += [ '-F', args['fstab'], ]
-
- if get_platform().lower() == 'linux':
- cmd += [ '-T', args['fstab'], ]
+ if get_platform().lower() == 'openbsd':
+ # Use module.params['fstab'] here as args['fstab'] has been set to the
+ # default value.
+ if module.params['fstab'] is not None:
+ module.fail_json(msg='OpenBSD does not support alternate fstab files. Do not specify the fstab parameter for OpenBSD hosts')
+ else:
+ cmd += _set_fstab_args(args['fstab'])
- cmd += [ name, ]
+ cmd += [name]
rc, out, err = module.run_command(cmd)
+
if rc == 0:
return 0, ''
else:
return rc, out+err
-def umount(module, **kwargs):
- """ unmount a path """
- umount_bin = module.get_bin_path('umount')
- name = kwargs['name']
- cmd = [umount_bin, name]
+def umount(module, dest):
+ """Unmount a path."""
+
+ umount_bin = module.get_bin_path('umount', required=True)
+ cmd = [umount_bin, dest]
rc, out, err = module.run_command(cmd)
+
if rc == 0:
return 0, ''
else:
return rc, out+err
-def main():
+def remount(module, mount_bin, args):
+ ''' will try to use -o remount first and fallback to unmount/mount if unsupported'''
+ msg = ''
+ cmd = [mount_bin]
+
+ # multiplatform remount opts
+ if get_platform().lower().endswith('bsd'):
+ cmd += ['-u']
+ else:
+ cmd += ['-o', 'remount' ]
+ if get_platform().lower() == 'openbsd':
+ # Use module.params['fstab'] here as args['fstab'] has been set to the
+ # default value.
+ if module.params['fstab'] is not None:
+ module.fail_json(msg='OpenBSD does not support alternate fstab files. Do not specify the fstab parameter for OpenBSD hosts')
+ else:
+ cmd += _set_fstab_args(args['fstab'])
+ cmd += [ args['name'], ]
+ out = err = ''
+ try:
+ if get_platform().lower().endswith('bsd'):
+ # Note: Forcing BSDs to do umount/mount due to BSD remount not
+ # working as expected (suspect bug in the BSD mount command)
+ # Interested contributor could rework this to use mount options on
+ # the CLI instead of relying on fstab
+ # https://github.com/ansible/ansible-modules-core/issues/5591
+ rc = 1
+ else:
+ rc, out, err = module.run_command(cmd)
+ except:
+ rc = 1
+
+ if rc != 0:
+ msg = out + err
+ if ismount(args['name']):
+ rc, msg = umount(module, args['name'])
+ if rc == 0:
+ rc, msg = mount(module, args)
+ return rc, msg
+
+# Note if we wanted to put this into module_utils we'd have to get permission
+# from @jupeter -- https://github.com/ansible/ansible-modules-core/pull/2923
+# @jtyr -- https://github.com/ansible/ansible-modules-core/issues/4439
+# and @abadger to relicense from GPLv3+
+def is_bind_mounted(module, linux_mounts, dest, src=None, fstype=None):
+ """Return whether the dest is bind mounted
+
+ :arg module: The AnsibleModule (used for helper functions)
+ :arg dest: The directory to be mounted under. This is the primary means
+ of identifying whether the destination is mounted.
+ :kwarg src: The source directory. If specified, this is used to help
+ ensure that we are detecting that the correct source is mounted there.
+ :kwarg fstype: The filesystem type. If specified this is also used to
+ help ensure that we are detecting the right mount.
+ :kwarg linux_mounts: Cached list of mounts for Linux.
+ :returns: True if the dest is mounted with src otherwise False.
+ """
+
+ is_mounted = False
+
+ if get_platform() == 'Linux' and linux_mounts is not None:
+ if src is None:
+ # That's for unmounted/absent
+ if dest in linux_mounts:
+ is_mounted = True
+ else:
+ # That's for mounted
+ if dest in linux_mounts and linux_mounts[dest]['src'] == src:
+ is_mounted = True
+ else:
+ bin_path = module.get_bin_path('mount', required=True)
+ cmd = '%s -l' % bin_path
+ rc, out, err = module.run_command(cmd)
+ mounts = []
+
+ if len(out):
+ mounts = to_native(out).strip().split('\n')
+
+ for mnt in mounts:
+ arguments = mnt.split()
+
+ if (
+ (arguments[0] == src or src is None) and
+ arguments[2] == dest and
+ (arguments[4] == fstype or fstype is None)):
+ is_mounted = True
+
+ if is_mounted:
+ break
+
+ return is_mounted
+
+
+def get_linux_mounts(module):
+ """Gather mount information"""
+
+ mntinfo_file = "/proc/self/mountinfo"
+
+ try:
+ f = open(mntinfo_file)
+ except IOError:
+ return
+
+ lines = map(str.strip, f.readlines())
+
+ try:
+ f.close()
+ except IOError:
+ module.fail_json(msg="Cannot close file %s" % mntinfo_file)
+
+ mntinfo = []
+
+ for line in lines:
+ fields = line.split()
+
+ record = {
+ 'root': fields[3],
+ 'dst': fields[4],
+ 'opts': fields[5],
+ 'fields': fields[6:-4],
+ 'fs': fields[-3],
+ 'src': fields[-2],
+ }
+
+ mntinfo.append(record)
+
+ mounts = {}
+
+ for i, mnt in enumerate(mntinfo):
+ src = mnt['src']
+
+ if mnt['fs'] == 'tmpfs' and mnt['root'] != '/':
+ # == Example:
+ # 65 19 0:35 / /tmp rw shared:25 - tmpfs tmpfs rw
+ # 210 65 0:35 /aaa /tmp/bbb rw shared:25 - tmpfs tmpfs rw
+ # == Expected result:
+ # src=/tmp/aaa
+ # ==
+
+ shared = None
+
+ # Search for the shared field
+ for fld in mnt['fields']:
+ if fld.startswith('shared'):
+ shared = fld
+
+ if shared is None:
+ continue
+
+ dest = None
+
+ # Search fo the record with the same field
+ for j, m in enumerate(mntinfo):
+ if j < i:
+ if shared in m['fields']:
+ dest = m['dst']
+ else:
+ break
+
+ if dest is not None:
+ src = "%s%s" % (dest, mnt['root'])
+ else:
+ continue
+
+ elif mnt['root'] != '/' and len(mnt['fields']) > 0:
+ # == Example:
+ # 67 19 8:18 / /mnt/disk2 rw shared:26 - ext4 /dev/sdb2 rw
+ # 217 65 8:18 /test /tmp/ccc rw shared:26 - ext4 /dev/sdb2 rw
+ # == Expected result:
+ # src=/mnt/disk2/test
+ # ==
+
+ # Search for parent
+ for j, m in enumerate(mntinfo):
+ if j < i:
+ if m['src'] == mnt['src']:
+ src = "%s%s" % (m['dst'], mnt['root'])
+ else:
+ break
+
+ elif mnt['root'] != '/' and len(mnt['fields']) == 0:
+ # == Example 1:
+ # 27 20 8:1 /tmp/aaa /tmp/bbb rw - ext4 /dev/sdb2 rw
+ # == Example 2:
+ # 204 136 253:2 /rootfs / rw - ext4 /dev/sdb2 rw
+ # 141 140 253:2 /rootfs/tmp/aaa /tmp/bbb rw - ext4 /dev/sdb2 rw
+ # == Expected result:
+ # src=/tmp/aaa
+ # ==
+
+ src = mnt['root']
+
+ # Search for parent
+ for j, m in enumerate(mntinfo):
+ if j < i:
+ if (
+ m['src'] == mnt['src'] and
+ mnt['root'].startswith(m['root'])):
+ src = src.replace("%s/" % m['root'], '/', 1)
+ else:
+ break
+
+ mounts[mnt['dst']] = {
+ 'src': src,
+ 'opts': mnt['opts'],
+ 'fs': mnt['fs']
+ }
+
+ return mounts
+
+
+def main():
module = AnsibleModule(
- argument_spec = dict(
- state = dict(required=True, choices=['present', 'absent', 'mounted', 'unmounted']),
- name = dict(required=True),
- opts = dict(default=None),
- passno = dict(default=None, type='str'),
- dump = dict(default=None),
- src = dict(required=False),
- fstype = dict(required=False),
- fstab = dict(default='/etc/fstab')
+ argument_spec=dict(
+ boot=dict(default='yes', choices=['yes', 'no']),
+ dump=dict(),
+ fstab=dict(default=None),
+ fstype=dict(),
+ name=dict(required=True, type='path'),
+ opts=dict(),
+ passno=dict(type='str'),
+ src=dict(type='path'),
+ state=dict(
+ required=True,
+ choices=['present', 'absent', 'mounted', 'unmounted']),
),
supports_check_mode=True,
- required_if = (
+ required_if=(
['state', 'mounted', ['src', 'fstype']],
['state', 'present', ['src', 'fstype']]
)
)
-
changed = False
- rc = 0
- args = {'name': module.params['name']}
- if module.params['src'] is not None:
- args['src'] = module.params['src']
- if module.params['fstype'] is not None:
- args['fstype'] = module.params['fstype']
- if module.params['passno'] is not None:
- args['passno'] = module.params['passno']
- if module.params['opts'] is not None:
- args['opts'] = module.params['opts']
- if module.params['dump'] is not None:
- args['dump'] = module.params['dump']
- if module.params['fstab'] is not None:
- args['fstab'] = module.params['fstab']
-
- # if fstab file does not exist, we first need to create it. This mainly
- # happens when fstab optin is passed to the module.
+ # solaris args:
+ # name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab
+ # linux args:
+ # name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
+ # Note: Do not modify module.params['fstab'] as we need to know if the user
+ # explicitly specified it in mount() and remount()
+ if get_platform().lower() == 'sunos':
+ args = dict(
+ name=module.params['name'],
+ opts='-',
+ passno='-',
+ fstab=module.params['fstab'],
+ boot='yes'
+ )
+ if args['fstab'] is None:
+ args['fstab'] = '/etc/vfstab'
+ else:
+ args = dict(
+ name=module.params['name'],
+ opts='defaults',
+ dump='0',
+ passno='0',
+ fstab=module.params['fstab']
+ )
+ if args['fstab'] is None:
+ args['fstab'] = '/etc/fstab'
+
+ # FreeBSD doesn't have any 'default' so set 'rw' instead
+ if get_platform() == 'FreeBSD':
+ args['opts'] = 'rw'
+
+ linux_mounts = []
+
+ # Cache all mounts here in order we have consistent results if we need to
+ # call is_bind_mouted() multiple times
+ if get_platform() == 'Linux':
+ linux_mounts = get_linux_mounts(module)
+
+ if linux_mounts is None:
+ args['warnings'] = (
+ 'Cannot open file /proc/self/mountinfo. '
+ 'Bind mounts might be misinterpreted.')
+
+ # Override defaults with user specified params
+ for key in ('src', 'fstype', 'passno', 'opts', 'dump', 'fstab'):
+ if module.params[key] is not None:
+ args[key] = module.params[key]
+
+ # If fstab file does not exist, we first need to create it. This mainly
+ # happens when fstab option is passed to the module.
if not os.path.exists(args['fstab']):
if not os.path.exists(os.path.dirname(args['fstab'])):
os.makedirs(os.path.dirname(args['fstab']))
- open(args['fstab'],'a').close()
- # absent == remove from fstab and unmounted
- # unmounted == do not change fstab state, but unmount
- # present == add to fstab, do not change mount state
- # mounted == add to fstab if not there and make sure it is mounted, if it has changed in fstab then remount it
+ open(args['fstab'], 'a').close()
+
+ # absent:
+ # Remove from fstab and unmounted.
+ # unmounted:
+ # Do not change fstab state, but unmount.
+ # present:
+ # Add to fstab, do not change mount state.
+ # mounted:
+ # Add to fstab if not there and make sure it is mounted. If it has
+ # changed in fstab then remount it.
state = module.params['state']
- name = module.params['name']
+ name = module.params['name']
+
if state == 'absent':
- name, changed = unset_mount(module, **args)
+ name, changed = unset_mount(module, args)
+
if changed and not module.check_mode:
- if ismount(name):
- res,msg = umount(module, **args)
+ if ismount(name) or is_bind_mounted(module, linux_mounts, name):
+ res, msg = umount(module, name)
+
if res:
- module.fail_json(msg="Error unmounting %s: %s" % (name, msg))
+ module.fail_json(
+ msg="Error unmounting %s: %s" % (name, msg))
if os.path.exists(name):
try:
@@ -322,61 +686,55 @@ def main():
except (OSError, IOError):
e = get_exception()
module.fail_json(msg="Error rmdir %s: %s" % (name, str(e)))
-
- module.exit_json(changed=changed, **args)
-
- if state == 'unmounted':
- if ismount(name):
+ elif state == 'unmounted':
+ if ismount(name) or is_bind_mounted(module, linux_mounts, name):
if not module.check_mode:
- res,msg = umount(module, **args)
+ res, msg = umount(module, name)
+
if res:
- module.fail_json(msg="Error unmounting %s: %s" % (name, msg))
- changed = True
+ module.fail_json(
+ msg="Error unmounting %s: %s" % (name, msg))
- module.exit_json(changed=changed, **args)
+ changed = True
+ elif state == 'mounted':
+ if not os.path.exists(name) and not module.check_mode:
+ try:
+ os.makedirs(name)
+ except (OSError, IOError):
+ e = get_exception()
+ module.fail_json(
+ msg="Error making dir %s: %s" % (name, str(e)))
+
+ name, changed = set_mount(module, args)
+ res = 0
- if state in ['mounted', 'present']:
- if state == 'mounted':
- if not os.path.exists(name) and not module.check_mode:
- try:
- os.makedirs(name)
- except (OSError, IOError):
- e = get_exception()
- module.fail_json(msg="Error making dir %s: %s" % (name, str(e)))
-
- name, changed = set_mount(module, **args)
- if state == 'mounted':
- res = 0
- if ismount(name):
- if changed and not module.check_mode:
- res,msg = mount(module, **args)
- elif 'bind' in args.get('opts', []):
- changed = True
- cmd = 'mount -l'
- rc, out, err = module.run_command(cmd)
- allmounts = out.split('\n')
- for mounts in allmounts[:-1]:
- arguments = mounts.split()
- if arguments[0] == args['src'] and arguments[2] == args['name'] and arguments[4] == args['fstype']:
- changed = False
- if changed:
- res,msg = mount(module, **args)
- else:
+ if ismount(name):
+ if changed and not module.check_mode:
+ res, msg = mount(module, args)
changed = True
- if not module.check_mode:
- res,msg = mount(module, **args)
+ elif 'bind' in args.get('opts', []):
+ changed = True
+ if is_bind_mounted( module, linux_mounts, name, args['src'], args['fstype']):
+ changed = False
- if res:
- module.fail_json(msg="Error mounting %s: %s" % (name, msg))
+ if changed and not module.check_mode:
+ res, msg = mount(module, args)
+ else:
+ changed = True
+ if not module.check_mode:
+ res, msg = mount(module, args)
- module.exit_json(changed=changed, **args)
+ if res:
+ module.fail_json(msg="Error mounting %s: %s" % (name, msg))
+ elif state == 'present':
+ name, changed = set_mount(module, args)
+ else:
+ module.fail_json(msg='Unexpected position reached')
- module.fail_json(msg='Unexpected position reached')
+ module.exit_json(changed=changed, **args)
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.ismount import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/ping.py b/system/ping.py
index a701b009132..bee23a95c4a 100644
--- a/system/ping.py
+++ b/system/ping.py
@@ -20,6 +20,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: ping
diff --git a/system/seboolean.py b/system/seboolean.py
index 1fbb83f2a7d..9246d912a07 100644
--- a/system/seboolean.py
+++ b/system/seboolean.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: seboolean
@@ -50,7 +54,10 @@
EXAMPLES = '''
# Set (httpd_can_network_connect) flag on and keep it persistent across reboots
-- seboolean: name=httpd_can_network_connect state=yes persistent=yes
+- seboolean:
+ name: httpd_can_network_connect
+ state: yes
+ persistent: yes
'''
try:
@@ -71,7 +78,7 @@ def has_boolean_value(module, name):
rc, bools = selinux.security_get_boolean_names()
except OSError:
module.fail_json(msg="Failed to get list of boolean names")
- if name in bools:
+ if to_bytes(name) in bools:
return True
else:
return False
@@ -182,6 +189,11 @@ def main():
result = {}
result['name'] = name
+ if hasattr(selinux, 'selinux_boolean_sub'):
+ # selinux_boolean_sub allows sites to rename a boolean and alias the old name
+ # Feature only available in selinux library since 2012.
+ name = selinux.selinux_boolean_sub(name)
+
if not has_boolean_value(module, name):
module.fail_json(msg="SELinux boolean %s does not exist." % name)
@@ -210,4 +222,7 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+from ansible.module_utils._text import to_bytes
+
+if __name__ == '__main__':
+ main()
diff --git a/system/selinux.py b/system/selinux.py
index 2debb95a475..634ef4efcad 100644
--- a/system/selinux.py
+++ b/system/selinux.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: selinux
@@ -49,9 +53,19 @@
'''
EXAMPLES = '''
-- selinux: policy=targeted state=enforcing
-- selinux: policy=targeted state=permissive
-- selinux: state=disabled
+# Enable SELinux
+- selinux:
+ policy: targeted
+ state: enforcing
+
+# Put SELinux in permissive mode, logging actions that would be blocked.
+- selinux:
+ policy: targeted
+ state: permissive
+
+# Disable SELinux
+- selinux:
+ state: disabled
'''
import os
diff --git a/system/service.py b/system/service.py
index e8f50172a6e..b1cb289b49f 100644
--- a/system/service.py
+++ b/system/service.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: service
@@ -74,29 +78,51 @@
description:
- Additional arguments provided on the command line
aliases: [ 'args' ]
+ use:
+ description:
+ - The service module actually uses system specific modules, normally through auto detection, this setting can force a specific module.
+ - Normally it uses the value of the 'ansible_service_mgr' fact and falls back to the old 'service' module when none matching is found.
+ default: 'auto'
+ version_added: 2.2
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
-- service: name=httpd state=started
+- service:
+ name: httpd
+ state: started
# Example action to stop service httpd, if running
-- service: name=httpd state=stopped
+- service:
+ name: httpd
+ state: stopped
# Example action to restart service httpd, in all cases
-- service: name=httpd state=restarted
+- service:
+ name: httpd
+ state: restarted
# Example action to reload service httpd, in all cases
-- service: name=httpd state=reloaded
+- service:
+ name: httpd
+ state: reloaded
# Example action to enable service httpd, and not touch the running state
-- service: name=httpd enabled=yes
+- service:
+ name: httpd
+ enabled: yes
# Example action to start service foo, based on running process /usr/bin/foo
-- service: name=foo pattern=/usr/bin/foo state=started
+- service:
+ name: foo
+ pattern: /usr/bin/foo
+ state: started
# Example action to restart network service for interface eth0
-- service: name=network state=restarted args=eth0
+- service:
+ name: network
+ state: restarted
+ args: eth0
'''
@@ -109,6 +135,7 @@
import time
import string
import glob
+from ansible.module_utils.service import fail_if_missing
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
@@ -471,7 +498,7 @@ def check_systemd():
self.enable_cmd = location['chkconfig']
if self.enable_cmd is None:
- self.module.fail_json(msg="no service or tool found for: %s" % self.name)
+ fail_if_missing(self.module, False, self.name, msg='host')
# If no service control tool selected yet, try to see if 'service' is available
if self.svc_cmd is None and location.get('service', False):
@@ -988,7 +1015,7 @@ def service_enable(self):
# and hope for the best.
for rcvar in rcvars:
if '=' in rcvar:
- self.rcconf_key = rcvar.split('=')[0]
+ self.rcconf_key, default_rcconf_value = rcvar.split('=', 1)
break
if self.rcconf_key is None:
@@ -997,8 +1024,10 @@ def service_enable(self):
if self.sysrc_cmd: # FreeBSD >= 9.2
rc, current_rcconf_value, stderr = self.execute_command("%s -n %s" % (self.sysrc_cmd, self.rcconf_key))
+ # it can happen that rcvar is not set (case of a system coming from the ports collection)
+ # so we will fallback on the default
if rc != 0:
- self.module.fail_json(msg="unable to get current rcvar value", stdout=stdout, stderr=stderr)
+ current_rcconf_value = default_rcconf_value
if current_rcconf_value.strip().upper() != self.rcconf_value:
@@ -1350,7 +1379,7 @@ def service_enable(self):
def service_control(self):
status = self.get_sunos_svcs_status()
- # if starting or reloading, clear maintenace states
+ # if starting or reloading, clear maintenance states
if self.action in ['start', 'reload', 'restart'] and status in ['maintenance', 'degraded']:
rc, stdout, stderr = self.execute_command("%s clear %s" % (self.svcadm_cmd, self.name))
if rc != 0:
@@ -1365,9 +1394,9 @@ def service_control(self):
elif self.action == 'stop':
subcmd = "disable -st"
elif self.action == 'reload':
- subcmd = "refresh"
+ subcmd = "refresh -s"
elif self.action == 'restart' and status == 'online':
- subcmd = "restart"
+ subcmd = "restart -s"
elif self.action == 'restart' and status != 'online':
subcmd = "enable -rst"
@@ -1462,10 +1491,9 @@ def main():
runlevel = dict(required=False, default='default'),
arguments = dict(aliases=['args'], default=''),
),
- supports_check_mode=True
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
)
- if module.params['state'] is None and module.params['enabled'] is None:
- module.fail_json(msg="Neither 'state' nor 'enabled' set")
service = Service(module)
@@ -1544,4 +1572,5 @@ def main():
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/system/setup.py b/system/setup.py
index b0ad9c0aa45..81bbf43ddba 100644
--- a/system/setup.py
+++ b/system/setup.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: setup
@@ -119,7 +123,7 @@ def main():
gather_subset=dict(default=["all"], required=False, type='list'),
gather_timeout=dict(default=10, required=False, type='int'),
filter=dict(default="*", required=False),
- fact_path=dict(default='/etc/ansible/facts.d', required=False),
+ fact_path=dict(default='/etc/ansible/facts.d', required=False, type='path'),
),
supports_check_mode = True,
)
diff --git a/system/sysctl.py b/system/sysctl.py
index 3df8e1fef88..43312b0922e 100644
--- a/system/sysctl.py
+++ b/system/sysctl.py
@@ -20,6 +20,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: sysctl
@@ -76,25 +80,37 @@
EXAMPLES = '''
# Set vm.swappiness to 5 in /etc/sysctl.conf
-- sysctl:
- name: vm.swappiness
+- sysctl:
+ name: vm.swappiness
value: 5
state: present
# Remove kernel.panic entry from /etc/sysctl.conf
- sysctl:
name: kernel.panic
- state: absent
+ state: absent
sysctl_file: /etc/sysctl.conf
# Set kernel.panic to 3 in /tmp/test_sysctl.conf
-- sysctl: name=kernel.panic value=3 sysctl_file=/tmp/test_sysctl.conf reload=no
+- sysctl:
+ name: kernel.panic
+ value: 3
+ sysctl_file: /tmp/test_sysctl.conf
+ reload: no
# Set ip forwarding on in /proc and do not reload the sysctl file
-- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes
+- sysctl:
+ name: net.ipv4.ip_forward
+ value: 1
+ sysctl_set: yes
# Set ip forwarding on in /proc and in the sysctl file and reload if necessary
-- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes
+- sysctl:
+ name: net.ipv4.ip_forward
+ value: 1
+ sysctl_set: yes
+ state: present
+ reload: yes
'''
# ==============================================================
diff --git a/system/systemd.py b/system/systemd.py
index 3c83e3f69b7..0b6c05351bc 100644
--- a/system/systemd.py
+++ b/system/systemd.py
@@ -17,6 +17,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: systemd
author:
@@ -57,6 +61,13 @@
description:
- run daemon-reload before doing any other operations, to make sure systemd has read any changes.
aliases: ['daemon-reload']
+ user:
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+ description:
+ - run systemctl talking to the service manager of the calling user, rather than the service manager
+ of the system.
notes:
- One option other than name is required.
requirements:
@@ -66,17 +77,27 @@
EXAMPLES = '''
# Example action to start service httpd, if not running
- systemd: state=started name=httpd
+
# Example action to stop service cron on debian, if running
- systemd: name=cron state=stopped
-# Example action to restart service cron on centos, in all cases, also issue deamon-reload to pick up config changes
-- systemd: state=restarted daemon_reload: yes name=crond
+
+# Example action to restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
+- systemd:
+ state: restarted
+ daemon_reload: yes
+ name: crond
+
# Example action to reload service httpd, in all cases
-- systemd: name=httpd state=reloaded
+- systemd:
+ name: httpd
+ state: reloaded
+
# Example action to enable service httpd and ensure it is not masked
- systemd:
name: httpd
enabled: yes
masked: no
+
# Example action to enable a timer for dnf-automatic
- systemd:
name: dnf-automatic.timer
@@ -214,15 +235,15 @@
}
'''
-import os
-import glob
from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
+from ansible.module_utils._text import to_native
# ===========================================
# Main control flow
def main():
- # init
+ # initialize
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='str', aliases=['unit', 'service']),
@@ -230,13 +251,15 @@ def main():
enabled = dict(type='bool'),
masked = dict(type='bool'),
daemon_reload= dict(type='bool', default=False, aliases=['daemon-reload']),
+ user= dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']],
)
- # initialize
systemctl = module.get_bin_path('systemctl')
+ if module.params['user']:
+ systemctl = systemctl + " --user"
unit = module.params['name']
rc = 0
out = err = ''
@@ -244,6 +267,7 @@ def main():
'name': unit,
'changed': False,
'status': {},
+ 'warnings': [],
}
# Run daemon-reload first, if requested
@@ -252,44 +276,52 @@ def main():
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
- #TODO: check if service exists
+ found = False
+ is_initd = sysv_exists(unit)
+ is_systemd = False
+
+ # check service data, cannot error out on rc as it changes across versions, assume not found
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
- if rc != 0:
- module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, unit, err))
-
- # load return of systemctl show into dictionary for easy access and return
- k = None
- multival = []
- for line in out.split('\n'): # systemd can have multiline values delimited with {}
- if line.strip():
- if k is None:
- if '=' in line:
- k,v = line.split('=', 1)
- if v.lstrip().startswith('{'):
- if not v.rstrip().endswith('}'):
+ if rc == 0:
+ # load return of systemctl show into dictionary for easy access and return
+ multival = []
+ if out:
+ k = None
+ for line in to_native(out).split('\n'): # systemd can have multiline values delimited with {}
+ if line.strip():
+ if k is None:
+ if '=' in line:
+ k,v = line.split('=', 1)
+ if v.lstrip().startswith('{'):
+ if not v.rstrip().endswith('}'):
+ multival.append(line)
+ continue
+ result['status'][k] = v.strip()
+ k = None
+ else:
+ if line.rstrip().endswith('}'):
+ result['status'][k] = '\n'.join(multival).strip()
+ multival = []
+ k = None
+ else:
multival.append(line)
- continue
- result['status'][k] = v.strip()
- k = None
- else:
- if line.rstrip().endswith('}'):
- result['status'][k] = '\n'.join(multival).strip()
- multival = []
- k = None
- else:
- multival.append(line)
+ is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
- if 'LoadState' in result['status'] and result['status']['LoadState'] == 'not-found':
- module.fail_json(msg='Could not find the requested service "%r": %s' % (unit, err))
- elif 'LoadError' in result['status']:
- module.fail_json(msg="Failed to get the service status '%s': %s" % (unit, result['status']['LoadError']))
+ # Check for loading error
+ if is_systemd and 'LoadError' in result['status']:
+ module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
- # mask/unmask the service, if requested
+ # Does service exist?
+ found = is_systemd or is_initd
+ if is_initd and not is_systemd:
+ result['warnings'].append('The service (%s) is actually an init script but the system is managed by systemd' % unit)
+
+ # mask/unmask the service, if requested, can operate on services before they are installed
if module.params['masked'] is not None:
- masked = (result['status']['LoadState'] == 'masked')
+ # state is not masked unless systemd affirms otherwise
+ masked = ('LoadState' in result['status'] and result['status']['LoadState'] == 'masked')
- # Change?
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
@@ -300,10 +332,20 @@ def main():
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
- module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
+ # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
+ fail_if_missing(module, found, unit, msg='host')
+
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
+
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ fail_if_missing(module, found, unit, msg='host')
+
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
@@ -312,11 +354,8 @@ def main():
if rc == 0:
enabled = True
elif rc == 1:
- # Deals with init scripts
# if both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
- initscript = '/etc/init.d/' + unit
- if os.path.exists(initscript) and os.access(initscript, os.X_OK) and \
- (not out.startswith('disabled') or bool(glob.glob('/etc/rc?.d/S??' + unit))):
+ if is_initd and (not out.startswith('disabled') or sysv_is_enabled(unit)):
enabled = True
# default to current state
@@ -325,19 +364,16 @@ def main():
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
- if module.params['enabled']:
- action = 'enable'
- else:
- action = 'disable'
-
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
- module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
result['enabled'] = not enabled
+ # set service state if requested
if module.params['state'] is not None:
+ fail_if_missing(module, found, unit, msg="host")
# default to desired state
result['state'] = module.params['state']
@@ -348,17 +384,18 @@ def main():
if module.params['state'] == 'started':
if result['status']['ActiveState'] != 'active':
action = 'start'
- result['changed'] = True
elif module.params['state'] == 'stopped':
if result['status']['ActiveState'] == 'active':
action = 'stop'
- result['changed'] = True
else:
- action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ if result['status']['ActiveState'] != 'active':
+ action = 'start'
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
- result['changed'] = True
if action:
+ result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
diff --git a/system/user.py b/system/user.py
index dbf3f3dc865..ed5503583ae 100644
--- a/system/user.py
+++ b/system/user.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: user
@@ -61,9 +65,10 @@
groups:
required: false
description:
- - Puts the user in this comma-delimited list of groups. When set to
- the empty string ('groups='), the user is removed from all groups
- except the primary group.
+ - Puts the user in list of groups. When set to the empty string ('groups='),
+ the user is removed from all groups except the primary group.
+ - Before version 2.3, the only input format allowed was a 'comma separated string',
+ now it should be able to accept YAML lists also.
append:
required: false
default: "no"
@@ -159,7 +164,7 @@
default: rsa
version_added: "0.9"
description:
- - Optionally specify the type of SSH key to generate.
+ - Optionally specify the type of SSH key to generate.
Available SSH key types will depend on implementation
present on target host.
ssh_key_file:
@@ -200,19 +205,38 @@
EXAMPLES = '''
# Add the user 'johnd' with a specific uid and a primary group of 'admin'
-- user: name=johnd comment="John Doe" uid=1040 group=admin
+- user:
+ name: johnd
+ comment: "John Doe"
+ uid: 1040
+ group: admin
# Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
-- user: name=james shell=/bin/bash groups=admins,developers append=yes
+- user:
+ name: james
+ shell: /bin/bash
+ groups: admins,developers
+ append: yes
# Remove the user 'johnd'
-- user: name=johnd state=absent remove=yes
+- user:
+ name: johnd
+ state: absent
+ remove: yes
# Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
-- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa
+- user:
+ name: jsmith
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_file: .ssh/id_rsa
# added a consultant whose account you want to expire
-- user: name=james18 shell=/bin/zsh groups=developers expires=1422403387
+- user:
+ name: james18
+ shell: /bin/zsh
+ groups: developers
+ expires: 1422403387
'''
import os
@@ -221,6 +245,7 @@
import platform
import socket
import time
+from ansible.module_utils._text import to_native
try:
import spwd
@@ -261,7 +286,6 @@ def __init__(self, module):
self.non_unique = module.params['non_unique']
self.seuser = module.params['seuser']
self.group = module.params['group']
- self.groups = module.params['groups']
self.comment = module.params['comment']
self.shell = module.params['shell']
self.password = module.params['password']
@@ -281,6 +305,10 @@ def __init__(self, module):
self.update_password = module.params['update_password']
self.home = module.params['home']
self.expires = None
+ self.groups = None
+
+ if module.params['groups'] is not None:
+ self.groups = ','.join(module.params['groups'])
if module.params['expires']:
try:
@@ -402,7 +430,7 @@ def _check_usermod_append(self):
helpout = data1 + data2
# check if --append exists
- lines = helpout.split('\n')
+ lines = to_native(helpout).split('\n')
for line in lines:
if line.strip().startswith('-a, --append'):
return True
@@ -1237,6 +1265,29 @@ class SunOS(User):
distribution = None
SHADOWFILE = '/etc/shadow'
+ def get_password_defaults(self):
+ # Read password aging defaults
+ try:
+ minweeks = ''
+ maxweeks = ''
+ warnweeks = ''
+ for line in open("/etc/default/passwd", 'r'):
+ line = line.strip()
+ if (line.startswith('#') or line == ''):
+ continue
+ key, value = line.split('=')
+ if key == "MINWEEKS":
+ minweeks = value.rstrip('\n')
+ elif key == "MAXWEEKS":
+ maxweeks = value.rstrip('\n')
+ elif key == "WARNWEEKS":
+ warnweeks = value.rstrip('\n')
+ except Exception:
+ err = get_exception()
+ self.module.fail_json(msg="failed to read /etc/default/passwd: %s" % str(err))
+
+ return (minweeks, maxweeks, warnweeks)
+
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
@@ -1294,6 +1345,7 @@ def create_user(self):
if not self.module.check_mode:
# we have to set the password by editing the /etc/shadow file
if self.password is not None:
+ minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
@@ -1303,6 +1355,12 @@ def create_user(self):
continue
fields[1] = self.password
fields[2] = str(int(time.time() / 86400))
+ if minweeks:
+ fields[3] = str(int(minweeks) * 7)
+ if maxweeks:
+ fields[4] = str(int(maxweeks) * 7)
+ if warnweeks:
+ fields[5] = str(int(warnweeks) * 7)
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
@@ -1381,6 +1439,7 @@ def modify_user_usermod(self):
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
(rc, out, err) = (0, '', '')
if not self.module.check_mode:
+ minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
@@ -1390,6 +1449,12 @@ def modify_user_usermod(self):
continue
fields[1] = self.password
fields[2] = str(int(time.time() / 86400))
+ if minweeks:
+ fields[3] = str(int(minweeks) * 7)
+ if maxweeks:
+ fields[4] = str(int(maxweeks) * 7)
+ if warnweeks:
+ fields[5] = str(int(warnweeks) * 7)
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
@@ -1490,7 +1555,7 @@ def _get_next_uid(self):
def _change_user_password(self):
'''Change password for SELF.NAME against SELF.PASSWORD.
- Please note that password must be cleatext.
+ Please note that password must be cleartext.
'''
# some documentation on how is stored passwords on OSX:
# http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
@@ -1522,7 +1587,7 @@ def _make_group_numerical(self):
def __modify_group(self, group, action):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
- ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. '''
+ ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
if action == 'add':
option = '-a'
else:
@@ -1536,7 +1601,7 @@ def __modify_group(self, group, action):
def _modify_group(self):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
- ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. '''
+ ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
rc = 0
out = ''
@@ -1655,7 +1720,7 @@ def create_user(self, command_name='dscl'):
self.chown_homedir(int(self.uid), int(self.group), self.home)
for field in self.fields:
- if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]:
+ if field[0] in self.__dict__ and self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
@@ -1692,7 +1757,7 @@ def modify_user(self):
self._make_group_numerical()
for field in self.fields:
- if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]:
+ if field[0] in self.__dict__ and self.__dict__[field[0]]:
current = self._get_user_property(field[1])
if current is None or current != self.__dict__[field[0]]:
cmd = self._get_dscl()
@@ -2042,7 +2107,7 @@ def main():
uid=dict(default=None, type='str'),
non_unique=dict(default='no', type='bool'),
group=dict(default=None, type='str'),
- groups=dict(default=None, type='str'),
+ groups=dict(default=None, type='list'),
comment=dict(default=None, type='str'),
home=dict(default=None, type='path'),
shell=dict(default=None, type='str'),
diff --git a/test-docs.sh b/test-docs.sh
deleted file mode 100755
index 76297fbada6..00000000000
--- a/test-docs.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh
-set -x
-
-CHECKOUT_DIR=".ansible-checkout"
-MOD_REPO="$1"
-
-# Hidden file to avoid the module_formatter recursing into the checkout
-git clone https://github.com/ansible/ansible "$CHECKOUT_DIR"
-cd "$CHECKOUT_DIR"
-git submodule update --init
-rm -rf "lib/ansible/modules/$MOD_REPO"
-ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO"
-
-pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx
-
-. ./hacking/env-setup
-PAGER=/bin/cat bin/ansible-doc -l
-if [ $? -ne 0 ] ; then
- exit $?
-fi
-make -C docsite
diff --git a/test-requirements.txt b/test-requirements.txt
deleted file mode 100644
index 93253de97a3..00000000000
--- a/test-requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-mock
-pytest
diff --git a/test/unit/cloud/openstack/test_os_server.py b/test/unit/cloud/openstack/test_os_server.py
deleted file mode 100644
index bb1f79ad2f9..00000000000
--- a/test/unit/cloud/openstack/test_os_server.py
+++ /dev/null
@@ -1,221 +0,0 @@
-import mock
-import pytest
-import yaml
-import inspect
-import collections
-
-from cloud.openstack import os_server
-
-
-class AnsibleFail(Exception):
- pass
-
-
-class AnsibleExit(Exception):
- pass
-
-
-def params_from_doc(func):
- '''This function extracts the docstring from the specified function,
- parses it as a YAML document, and returns parameters for the os_server
- module.'''
-
- doc = inspect.getdoc(func)
- cfg = yaml.load(doc)
-
- for task in cfg:
- for module, params in task.items():
- for k, v in params.items():
- if k in ['nics'] and type(v) == str:
- params[k] = [v]
- task[module] = collections.defaultdict(str,
- params)
-
- return cfg[0]['os_server']
-
-
-class FakeCloud (object):
- ports = [
- {'name': 'port1', 'id': '1234'},
- {'name': 'port2', 'id': '4321'},
- ]
-
- networks = [
- {'name': 'network1', 'id': '5678'},
- {'name': 'network2', 'id': '8765'},
- ]
-
- images = [
- {'name': 'cirros', 'id': '1'},
- {'name': 'fedora', 'id': '2'},
- ]
-
- flavors = [
- {'name': 'm1.small', 'id': '1', 'flavor_ram': 1024},
- {'name': 'm1.tiny', 'id': '2', 'flavor_ram': 512},
- ]
-
- def _find(self, source, name):
- for item in source:
- if item['name'] == name or item['id'] == name:
- return item
-
- def get_image_id(self, name, exclude=None):
- image = self._find(self.images, name)
- if image:
- return image['id']
-
- def get_flavor(self, name):
- return self._find(self.flavors, name)
-
- def get_flavor_by_ram(self, ram, include=None):
- for flavor in self.flavors:
- if flavor['ram'] >= ram and (include is None or include in
- flavor['name']):
- return flavor
-
- def get_port(self, name):
- return self._find(self.ports, name)
-
- def get_network(self, name):
- return self._find(self.networks, name)
-
- create_server = mock.MagicMock()
-
-
-class TestNetworkArgs(object):
- '''This class exercises the _network_args function of the
- os_server module. For each test, we parse the YAML document
- contained in the docstring to retrieve the module parameters for the
- test.'''
-
- def setup_method(self, method):
- self.cloud = FakeCloud()
- self.module = mock.MagicMock()
- self.module.params = params_from_doc(method)
-
- def test_nics_string_net_id(self):
- '''
- - os_server:
- nics: net-id=1234
- '''
- args = os_server._network_args(self.module, self.cloud)
- assert(args[0]['net-id'] == '1234')
-
- def test_nics_string_net_id_list(self):
- '''
- - os_server:
- nics: net-id=1234,net-id=4321
- '''
- args = os_server._network_args(self.module, self.cloud)
- assert(args[0]['net-id'] == '1234')
- assert(args[1]['net-id'] == '4321')
-
- def test_nics_string_port_id(self):
- '''
- - os_server:
- nics: port-id=1234
- '''
- args = os_server._network_args(self.module, self.cloud)
- assert(args[0]['port-id'] == '1234')
-
- def test_nics_string_net_name(self):
- '''
- - os_server:
- nics: net-name=network1
- '''
- args = os_server._network_args(self.module, self.cloud)
- assert(args[0]['net-id'] == '5678')
-
- def test_nics_string_port_name(self):
- '''
- - os_server:
- nics: port-name=port1
- '''
- args = os_server._network_args(self.module, self.cloud)
- assert(args[0]['port-id'] == '1234')
-
- def test_nics_structured_net_id(self):
- '''
- - os_server:
- nics:
- - net-id: '1234'
- '''
- args = os_server._network_args(self.module, self.cloud)
- assert(args[0]['net-id'] == '1234')
-
- def test_nics_structured_mixed(self):
- '''
- - os_server:
- nics:
- - net-id: '1234'
- - port-name: port1
- - 'net-name=network1,port-id=4321'
- '''
- args = os_server._network_args(self.module, self.cloud)
- assert(args[0]['net-id'] == '1234')
- assert(args[1]['port-id'] == '1234')
- assert(args[2]['net-id'] == '5678')
- assert(args[3]['port-id'] == '4321')
-
-
-class TestCreateServer(object):
- def setup_method(self, method):
- self.cloud = FakeCloud()
- self.module = mock.MagicMock()
- self.module.params = params_from_doc(method)
- self.module.fail_json.side_effect = AnsibleFail()
- self.module.exit_json.side_effect = AnsibleExit()
-
- self.meta = mock.MagicMock()
- self.meta.gett_hostvars_from_server.return_value = {
- 'id': '1234'
- }
- os_server.meta = self.meta
-
- def test_create_server(self):
- '''
- - os_server:
- image: cirros
- flavor: m1.tiny
- nics:
- - net-name: network1
- '''
- with pytest.raises(AnsibleExit):
- os_server._create_server(self.module, self.cloud)
-
- assert(self.cloud.create_server.call_count == 1)
- assert(self.cloud.create_server.call_args[1]['image']
- == self.cloud.get_image_id('cirros'))
- assert(self.cloud.create_server.call_args[1]['flavor']
- == self.cloud.get_flavor('m1.tiny')['id'])
- assert(self.cloud.create_server.call_args[1]['nics'][0]['net-id']
- == self.cloud.get_network('network1')['id'])
-
- def test_create_server_bad_flavor(self):
- '''
- - os_server:
- image: cirros
- flavor: missing_flavor
- nics:
- - net-name: network1
- '''
- with pytest.raises(AnsibleFail):
- os_server._create_server(self.module, self.cloud)
-
- assert('missing_flavor' in
- self.module.fail_json.call_args[1]['msg'])
-
- def test_create_server_bad_nic(self):
- '''
- - os_server:
- image: cirros
- flavor: m1.tiny
- nics:
- - net-name: missing_network
- '''
- with pytest.raises(AnsibleFail):
- os_server._create_server(self.module, self.cloud)
-
- assert('missing_network' in
- self.module.fail_json.call_args[1]['msg'])
diff --git a/test/utils/shippable/docs-requirements.txt b/test/utils/shippable/docs-requirements.txt
new file mode 100644
index 00000000000..4e859bb8c71
--- /dev/null
+++ b/test/utils/shippable/docs-requirements.txt
@@ -0,0 +1,2 @@
+jinja2
+pyyaml
diff --git a/test/utils/shippable/docs.sh b/test/utils/shippable/docs.sh
new file mode 100755
index 00000000000..2858f87c997
--- /dev/null
+++ b/test/utils/shippable/docs.sh
@@ -0,0 +1,62 @@
+#!/bin/bash -eux
+
+set -o pipefail
+
+ansible_repo_url="https://github.com/ansible/ansible.git"
+
+build_dir="${SHIPPABLE_BUILD_DIR}"
+repo="${REPO_NAME}"
+
+case "${repo}" in
+ "ansible-modules-core")
+ this_module_group="core"
+ other_module_group="extras"
+ ;;
+ "ansible-modules-extras")
+ this_module_group="extras"
+ other_module_group="core"
+ ;;
+ *)
+ echo "Unsupported repo name: ${repo}"
+ exit 1
+ ;;
+esac
+
+modules_tmp_dir="${build_dir}.tmp"
+this_modules_dir="${build_dir}/lib/ansible/modules/${this_module_group}"
+other_modules_dir="${build_dir}/lib/ansible/modules/${other_module_group}"
+
+cd /
+mv "${build_dir}" "${modules_tmp_dir}"
+git clone "${ansible_repo_url}" "${build_dir}"
+cd "${build_dir}"
+rmdir "${this_modules_dir}"
+mv "${modules_tmp_dir}" "${this_modules_dir}"
+mv "${this_modules_dir}/shippable" "${build_dir}"
+git submodule init "${other_modules_dir}"
+git submodule sync "${other_modules_dir}"
+git submodule update "${other_modules_dir}"
+
+pip install -r lib/ansible/modules/${this_module_group}/test/utils/shippable/docs-requirements.txt --upgrade
+pip list
+
+source hacking/env-setup
+
+docs_status=0
+
+PAGER=/bin/cat \
+ ANSIBLE_DEPRECATION_WARNINGS=false \
+ bin/ansible-doc -l \
+ 2>/tmp/ansible-doc.err || docs_status=$?
+
+if [ -s /tmp/ansible-doc.err ]; then
+ # report warnings as errors
+ echo "Output from 'ansible-doc -l' on stderr is considered an error:"
+ cat /tmp/ansible-doc.err
+ exit 1
+fi
+
+if [ "${docs_status}" -ne 0 ]; then
+ echo "Running 'ansible-doc -l' failed with no output on stderr and exit code: ${docs_status}"
+ exit 1
+fi
diff --git a/test/utils/shippable/integration.sh b/test/utils/shippable/integration.sh
index ee16e765c15..cf10e681bfb 100755
--- a/test/utils/shippable/integration.sh
+++ b/test/utils/shippable/integration.sh
@@ -10,7 +10,7 @@ repo="${REPO_NAME}"
if [ "${is_pr}" != "true" ]; then
echo "Module integration tests are only supported on pull requests."
- exit 1
+ exit 0
fi
case "${repo}" in
diff --git a/test/utils/shippable/sanity-skip-python24.txt b/test/utils/shippable/sanity-skip-python24.txt
new file mode 100644
index 00000000000..1a0a28c4d49
--- /dev/null
+++ b/test/utils/shippable/sanity-skip-python24.txt
@@ -0,0 +1 @@
+/cloud/[^/]+/(?!(ec2_facts.py|_ec2_ami_search.py))
diff --git a/test/utils/shippable/sanity.sh b/test/utils/shippable/sanity.sh
new file mode 100755
index 00000000000..8c1453022e7
--- /dev/null
+++ b/test/utils/shippable/sanity.sh
@@ -0,0 +1,41 @@
+#!/bin/bash -eux
+
+source_root=$(python -c "from os import path; print(path.abspath(path.join(path.dirname('$0'), '../../..')))")
+
+install_deps="${INSTALL_DEPS:-}"
+
+cd "${source_root}"
+
+# FIXME REPOMERGE: No need to checkout ansible
+build_dir=$(mktemp -d)
+trap 'rm -rf "${build_dir}"' EXIT
+
+git clone "https://github.com/ansible/ansible.git" "${build_dir}" --recursive
+source "${build_dir}/hacking/env-setup"
+# REPOMERGE: END
+
+if [ "${install_deps}" != "" ]; then
+ add-apt-repository ppa:fkrull/deadsnakes
+ apt-add-repository 'deb http://archive.ubuntu.com/ubuntu trusty-backports universe'
+ apt-get update -qq
+
+ apt-get install -qq shellcheck python2.4
+
+ # Install dependencies for ansible and validate_modules
+ pip install -r "${build_dir}/test/utils/shippable/sanity-requirements.txt" --upgrade
+ pip list
+
+fi
+
+validate_modules="${build_dir}/test/sanity/validate-modules/validate-modules"
+
+python2.4 -m compileall -fq -x "($(printf %s "$(< "test/utils/shippable/sanity-skip-python24.txt"))" | tr '\n' '|')" .
+python2.6 -m compileall -fq .
+python2.7 -m compileall -fq .
+python3.5 -m compileall -fq .
+
+ANSIBLE_DEPRECATION_WARNINGS=false \
+ "${validate_modules}" --exclude '/utilities/|/shippable(/|$)' .
+
+shellcheck \
+ test/utils/shippable/*.sh
diff --git a/utilities/helper/_accelerate.py b/utilities/helper/_accelerate.py
index 13d0510f0ce..5bd38931f22 100644
--- a/utilities/helper/_accelerate.py
+++ b/utilities/helper/_accelerate.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: accelerate
@@ -255,7 +259,7 @@ def run(self):
conn.sendall("EXISTS\n")
# update the last event time so the server doesn't
- # shutdown sooner than expected for new cliets
+ # shutdown sooner than expected for new clients
try:
self.server.last_event_lock.acquire()
self.server.last_event = datetime.datetime.now()
@@ -738,4 +742,5 @@ def main():
# try to start up the daemon
daemonize(module, password, port, timeout, minutes, ipv6, pid_file)
-main()
+if __name__ == '__main__':
+ main()
diff --git a/utilities/helper/_fireball.py b/utilities/helper/_fireball.py
index ba0770d7839..d3bc837fa4f 100644
--- a/utilities/helper/_fireball.py
+++ b/utilities/helper/_fireball.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: fireball
diff --git a/utilities/helper/meta.py b/utilities/helper/meta.py
index 6735bbc8e98..a89bfc5d394 100644
--- a/utilities/helper/meta.py
+++ b/utilities/helper/meta.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
module: meta
short_description: Execute Ansible 'actions'
@@ -35,30 +39,40 @@
- "C(noop) (added in 2.0) This literally does 'nothing'. It is mainly used internally and not recommended for general use."
- "C(clear_facts) (added in 2.1) causes the gathered facts for the hosts specified in the play's list of hosts to be cleared, including the fact cache."
- "C(clear_host_errors) (added in 2.1) clears the failed state (if any) from hosts specified in the play's list of hosts."
- choices: ['noop', 'flush_handlers', 'refresh_inventory', 'clear_facts', 'clear_host_errors']
+ - "C(end_play) (added in 2.2) causes the play to end without failing the host."
+ choices: ['noop', 'flush_handlers', 'refresh_inventory', 'clear_facts', 'clear_host_errors', 'end_play']
required: true
default: null
notes:
- - meta is not really a module nor action_plugin as such it cannot be overriden.
+ - meta is not really a module nor action_plugin as such it cannot be overwritten.
author:
- "Ansible Core Team"
'''
EXAMPLES = '''
# force all notified handlers to run at this point, not waiting for normal sync points
-- template: src=new.j2 dest=/etc/config.txt
+- template:
+ src: new.j2
+ dest: /etc/config.txt
notify: myhandler
- meta: flush_handlers
# reload inventory, useful with dynamic inventories when play makes changes to the existing hosts
-- cloud_guest: name=newhost state=present # this is fake module
-- meta: refresh_inventory
+- cloud_guest: # this is fake module
+ name: newhost
+ state: present
+
+- name: Refresh inventory to ensure new instaces exist in inventory
+ meta: refresh_inventory
-# clear gathered facts from all currently targeted hosts
-- meta: clear_facts
+- name: Clear gathered facts from all currently targeted hosts
+ meta: clear_facts
# bring host back to play after failure
-- copy: src=file dest=/etc/file
+- copy:
+ src: file
+ dest: /etc/file
remote_user: imightnothavepermission
+
- meta: clear_host_errors
'''
diff --git a/utilities/logic/assert.py b/utilities/logic/assert.py
index e9e359f421a..875fc6e8565 100644
--- a/utilities/logic/assert.py
+++ b/utilities/logic/assert.py
@@ -18,12 +18,16 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: assert
-short_description: Fail with custom message
+short_description: Asserts given expressions are true
description:
- - This module asserts that a given expression is true and can be a simpler alternative to the 'fail' module in some cases.
+ - This module asserts that given expressions are true with an optional custom message.
version_added: "1.5"
options:
that:
@@ -31,6 +35,10 @@
- "A string expression of the same form that can be passed to the 'when' statement"
- "Alternatively, a list of string expressions"
required: true
+ msg:
+ description:
+ - "The customized message used for a failing assertion"
+ required: false
author:
- "Ansible Core Team"
- "Michael DeHaan"
@@ -43,4 +51,10 @@
that:
- "'foo' in some_command_result.stdout"
- "number_of_the_counting == 3"
+
+- assert:
+ that:
+ - "my_param <= 100"
+ - "my_param >= 0"
+ msg: "'my_param' must be between 0 and 100"
'''
diff --git a/utilities/logic/async_status.py b/utilities/logic/async_status.py
index 0d434d46f87..7093cd32db7 100644
--- a/utilities/logic/async_status.py
+++ b/utilities/logic/async_status.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: async_status
@@ -50,6 +54,7 @@
import datetime
import traceback
+from ansible.module_utils.six import iteritems
def main():
@@ -78,7 +83,7 @@ def main():
data = None
try:
- data = file(log_path).read()
+ data = open(log_path).read()
data = json.loads(data)
except Exception:
if not data:
@@ -95,10 +100,12 @@ def main():
data['finished'] = 0
# Fix error: TypeError: exit_json() keywords must be strings
- data = dict([(str(k), v) for k, v in data.iteritems()])
+ data = dict([(str(k), v) for k, v in iteritems(data)])
module.exit_json(**data)
# import module snippets
from ansible.module_utils.basic import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/utilities/logic/async_wrapper.py b/utilities/logic/async_wrapper.py
index 3a9b44d4cd0..ae23d19a4a4 100644
--- a/utilities/logic/async_wrapper.py
+++ b/utilities/logic/async_wrapper.py
@@ -24,6 +24,7 @@
except ImportError:
import simplejson as json
import shlex
+import shutil
import os
import subprocess
import sys
@@ -32,6 +33,8 @@
import time
import syslog
+PY3 = sys.version_info[0] == 3
+
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
@@ -49,8 +52,7 @@ def daemonize_self():
e = sys.exc_info()[1]
sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
- # decouple from parent environment
- os.chdir("/")
+ # decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
os.setsid()
os.umask(int('022', 8))
@@ -64,11 +66,67 @@ def daemonize_self():
e = sys.exc_info()[1]
sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
- dev_null = file('/dev/null','rw')
+ dev_null = open('/dev/null', 'w')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
+# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
+# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
+def _filter_non_json_lines(data):
+ '''
+ Used to filter unrelated output around module JSON output, like messages from
+ tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
+
+ Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
+ trailing lines after matching close character (working from the bottom of output).
+ '''
+ warnings = []
+
+ # Filter initial junk
+ lines = data.splitlines()
+
+ for start, line in enumerate(lines):
+ line = line.strip()
+ if line.startswith(u'{'):
+ endchar = u'}'
+ break
+ elif line.startswith(u'['):
+ endchar = u']'
+ break
+ else:
+ raise ValueError('No start of json char found')
+
+ # Filter trailing junk
+ lines = lines[start:]
+
+ for reverse_end_offset, line in enumerate(reversed(lines)):
+ if line.strip().endswith(endchar):
+ break
+ else:
+ raise ValueError('No end of json char found')
+
+ if reverse_end_offset > 0:
+ # Trailing junk is uncommon and can point to things the user might
+ # want to change. So print a warning if we find any
+ trailing_junk = lines[len(lines) - reverse_end_offset:]
+ warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
+
+ lines = lines[:(len(lines) - reverse_end_offset)]
+
+ return ('\n'.join(lines), warnings)
+
+
+def _get_interpreter(module_path):
+ module_fd = open(module_path, 'rb')
+ try:
+ head = module_fd.read(1024)
+ if head[0:2] != '#!':
+ return None
+ return head[2:head.index('\n')].strip().split(' ')
+ finally:
+ module_fd.close()
+
def _run_module(wrapped_cmd, jid, job_path):
@@ -81,11 +139,33 @@ def _run_module(wrapped_cmd, jid, job_path):
result = {}
outdata = ''
+ filtered_outdata = ''
+ stderr = ''
try:
cmd = shlex.split(wrapped_cmd)
+ # call the module interpreter directly (for non-binary modules)
+ # this permits use of a script for an interpreter on non-Linux platforms
+ interpreter = _get_interpreter(cmd[0])
+ if interpreter:
+ cmd = interpreter + cmd
script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outdata, stderr) = script.communicate()
- result = json.loads(outdata)
+ if PY3:
+ outdata = outdata.decode('utf-8', 'surrogateescape')
+ stderr = stderr.decode('utf-8', 'surrogateescape')
+
+ (filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
+
+ result = json.loads(filtered_outdata)
+
+ if json_warnings:
+ # merge JSON junk warnings with any existing module warnings
+ module_warnings = result.get('warnings', [])
+ if not isinstance(module_warnings, list):
+ module_warnings = [module_warnings]
+ module_warnings.extend(json_warnings)
+ result['warnings'] = module_warnings
+
if stderr:
result['stderr'] = stderr
jobfile.write(json.dumps(result))
@@ -96,15 +176,18 @@ def _run_module(wrapped_cmd, jid, job_path):
"failed": 1,
"cmd" : wrapped_cmd,
"msg": str(e),
+ "outdata": outdata, # temporary notice only
+ "stderr": stderr
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
- except:
+ except (ValueError, Exception):
result = {
"failed" : 1,
"cmd" : wrapped_cmd,
"data" : outdata, # temporary notice only
+ "stderr": stderr,
"msg" : traceback.format_exc()
}
result['ansible_job_id'] = jid
@@ -119,18 +202,26 @@ def _run_module(wrapped_cmd, jid, job_path):
####################
if __name__ == '__main__':
- if len(sys.argv) < 3:
+ if len(sys.argv) < 5:
print(json.dumps({
"failed" : True,
- "msg" : "usage: async_wrapper . Humans, do not call directly!"
+ "msg" : "usage: async_wrapper [-preserve_tmp] "
+ "Humans, do not call directly!"
}))
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
- if len(sys.argv) >= 5:
- argsfile = sys.argv[4]
+ argsfile = sys.argv[4]
+ if '-tmp-' not in os.path.dirname(wrapped_module):
+ preserve_tmp = True
+ elif len(sys.argv) > 5:
+ preserve_tmp = sys.argv[5] == '-preserve_tmp'
+ else:
+ preserve_tmp = False
+ # consider underscore as no argsfile so we can support passing of additional positional parameters
+ if argsfile != '_':
cmd = "%s %s" % (wrapped_module, argsfile)
else:
cmd = wrapped_module
@@ -156,12 +247,13 @@ def _run_module(wrapped_cmd, jid, job_path):
if pid:
# Notify the overlord that the async process started
- # we need to not return immmediately such that the launched command has an attempt
+ # we need to not return immediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
notice("Return async_wrapper task started.")
- print(json.dumps({ "started" : 1, "finished" : 0, "ansible_job_id" : jid, "results_file" : job_path }))
+ print(json.dumps({ "started" : 1, "finished" : 0, "ansible_job_id" : jid, "results_file" : job_path,
+ "_suppress_tmpdir_delete": not preserve_tmp}))
sys.stdout.flush()
time.sleep(1)
sys.exit(0)
@@ -193,8 +285,12 @@ def _run_module(wrapped_cmd, jid, job_path):
os.killpg(sub_pid, signal.SIGKILL)
notice("Sent kill to group %s"%sub_pid)
time.sleep(1)
+ if not preserve_tmp:
+ shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
notice("Done in kid B.")
+ if not preserve_tmp:
+ shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
else:
# the child process runs the actual module
diff --git a/utilities/logic/debug.py b/utilities/logic/debug.py
index 89d9254a08e..8e8f16ca554 100644
--- a/utilities/logic/debug.py
+++ b/utilities/logic/debug.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: debug
@@ -44,23 +48,29 @@
required: False
default: 0
version_added: "2.1"
-author:
+author:
- "Dag Wieers (@dagwieers)"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Example that prints the loopback address and gateway for each host
-- debug: msg="System {{ inventory_hostname }} has uuid {{ ansible_product_uuid }}"
+- debug:
+ msg: "System {{ inventory_hostname }} has uuid {{ ansible_product_uuid }}"
-- debug: msg="System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}"
+- debug:
+ msg: "System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}"
when: ansible_default_ipv4.gateway is defined
- shell: /usr/bin/uptime
register: result
-- debug: var=result verbosity=2
+- debug:
+ var: result
+ verbosity: 2
- name: Display all variables/facts known for a host
- debug: var=hostvars[inventory_hostname] verbosity=4
+ debug:
+ var: hostvars[inventory_hostname]
+ verbosity: 4
'''
diff --git a/utilities/logic/fail.py b/utilities/logic/fail.py
index 75a7c81d1cf..544758ad100 100644
--- a/utilities/logic/fail.py
+++ b/utilities/logic/fail.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: fail
@@ -39,6 +43,7 @@
EXAMPLES = '''
# Example playbook using fail and when together
-- fail: msg="The system may not be provisioned according to the CMDB status."
+- fail:
+ msg: "The system may not be provisioned according to the CMDB status."
when: cmdb_status != "to-be-staged"
'''
diff --git a/utilities/logic/include.py b/utilities/logic/include.py
new file mode 100644
index 00000000000..3de1e198a2a
--- /dev/null
+++ b/utilities/logic/include.py
@@ -0,0 +1,71 @@
+#!/usr/bin/python
+# -*- mode: python -*-
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+author:
+ - "Ansible Core Team (@ansible)"
+module: include
+short_description: include a play or task list.
+description:
+ - Includes a file with a list of plays or tasks to be executed in the current playbook.
+ - Files with a list of plays can only be included at the top level, lists of tasks can only be included where tasks normally run (in play).
+ - Before 2.0 all includes were 'static', executed at play compile time.
+ - Static includes are not subject to most directives, for example, loops or conditionals, they are applied instead to each inherited task.
+ - Since 2.0 task includes are dynamic and behave more like real tasks. This means they can be looped, skipped and use variables from any source.
+ Ansible tries to auto detect this, use the `static` directive (new in 2.1) to bypass autodetection.
+version_added: "0.6"
+options:
+ free-form:
+ description:
+ - This module allows you to specify the name of the file directly w/o any other options.
+notes:
+ - This is really not a module, though it appears as such, this is a feature of the Ansible Engine, as such it cannot be overridden the same way a module can.
+'''
+
+EXAMPLES = """
+# include a play after another play
+- hosts: localhost
+ tasks:
+ - debug:
+ msg: "play1"
+
+- include: otherplays.yml
+
+
+# include task list in play
+- hosts: all
+ tasks:
+ - debug:
+ msg: task1
+
+ - include: stuff.yml
+
+ - debug:
+ msg: task10
+
+# dyanmic include task list in play
+- hosts: all
+ tasks:
+ - debug:
+ msg: task1
+
+ - include: "{{ hostvar }}.yml"
+ static: no
+ when: hostvar is defined
+"""
+
+RETURN = """
+# this module does not return anything except plays or tasks to execute
+"""
diff --git a/utilities/logic/include_role.py b/utilities/logic/include_role.py
index 784d3a5af4f..843a873b8ae 100644
--- a/utilities/logic/include_role.py
+++ b/utilities/logic/include_role.py
@@ -8,6 +8,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
author:
@@ -37,23 +41,34 @@
- "File to load from a Role's defaults/ directory."
required: False
default: 'main'
+ static:
+ description:
+ - Gives Ansible a hint if this is a 'static' include or not. If static it implies that it won't need templating nor loops nor conditionals and will show included tasks in the --list options.
+ required: False
+ default: None
+ allow_duplicates:
+ description:
+ - Overrides the role's metadata setting to allow using a role more than once with the same parameters.
+ required: False
+ default: True
+ private:
+ description:
+ - If True the variables from defaults/ and vars/ in a role will not be made available to the rest of the play.
+ default: None
notes:
- THIS IS EARLY PREVIEW, THINGS MAY CHANGE
- - Only basic roles have been tested for now, some things might not work as expected.
- Handlers are made available to the whole play.
- - Currently role variables are not pushed up to the play.
- simple dependencies seem to work fine.
- - Role search paths work (implicit vars/ templates/ files/ etc)
- - loops don't work.
- "Things not tested (yet): plugin overrides, nesting includes, used as handler, other stuff I did not think of when I wrote this."
'''
EXAMPLES = """
-- include_role: name=myrole
+- include_role:
+ name: myrole
- name: Run tasks/other.yml instead of 'main'
include_role:
- role: myrole
+ name: myrole
tasks_from: other
- name: Pass variables to role
@@ -62,6 +77,19 @@
vars:
rolevar1: 'value from task'
+- name: Use role in loop
+ include_role:
+ name: myrole
+ with_items:
+ - "{{roleinput1}}"
+ - "{{roleinput2}}"
+ loop_control:
+ loop_var: roleinputvar
+
+- name: conditional role
+ include_role:
+ name: myrole
+ when: not idontwanttorun
"""
RETURN = """
diff --git a/utilities/logic/include_vars.py b/utilities/logic/include_vars.py
index a0b3280d807..8e7f4aa1bda 100644
--- a/utilities/logic/include_vars.py
+++ b/utilities/logic/include_vars.py
@@ -8,30 +8,54 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
-author: "Benno Joy (@bennojoy)"
+author: "Allen Sanabria (@linuxdynasty)"
module: include_vars
short_description: Load variables from files, dynamically within a task.
description:
- - Loads variables from a YAML/JSON file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from.
+ - Loads variables from a YAML/JSON files dynamically from within a file or
+ from a directory recursively during task runtime. If loading a directory, the files are sorted alphabetically before being loaded.
+version_added: "1.4"
options:
file:
version_added: "2.2"
description:
- The file name from which variables should be loaded.
- If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook.
+ dir:
+ version_added: "2.2"
+ description:
+ - The directory name from which the variables should be loaded.
+ - If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook.
+ default: null
name:
version_added: "2.2"
description:
- The name of a variable into which assign the included vars, if omitted (null) they will be made top level vars.
default: null
+ depth:
+ version_added: "2.2"
+ description:
+ - By default, this module will recursively go through each sub directory and load up the variables. By explicitly setting the depth, this module will only go as deep as the depth.
+ default: 0
+ files_matching:
+ version_added: "2.2"
+ description:
+ - Limit the variables that are loaded within any directory to this regular expression.
+ default: null
+ ignore_files:
+ version_added: "2.2"
+ description:
+ - List of file names to ignore. The defaults can not be overridden, but can be extended.
+ default: null
free-form:
description:
- This module allows you to specify the 'file' option directly w/o any other options.
-notes:
- - The file is always required either as the explicit option or using the free-form.
-version_added: "1.4"
'''
EXAMPLES = """
@@ -41,17 +65,42 @@
name: stuff
# Conditionally decide to load in variables into 'plans' when x is 0, otherwise do not. (2.2)
-- include_vars: file=contingency_plan.yml name=plans
+- include_vars:
+ file: contingency_plan.yml
+ name: plans
when: x == 0
# Load a variable file based on the OS type, or a default if not found.
- include_vars: "{{ item }}"
with_first_found:
- - "{{ ansible_distribution }}.yml"
- - "{{ ansible_os_family }}.yml"
- - "default.yml"
+ - "{{ ansible_distribution }}.yml"
+ - "{{ ansible_os_family }}.yml"
+ - "default.yml"
# bare include (free-form)
- include_vars: myvars.yml
+# Include all yml files in vars/all and all nested directories
+- include_vars:
+ dir: 'vars/all'
+
+# Include all yml files in vars/all and all nested directories and save the output in test.
+- include_vars:
+ dir: 'vars/all'
+ name: test
+
+# Include all yml files in vars/services
+- include_vars:
+ dir: 'vars/services'
+ depth: 1
+
+# Include only bastion.yml files
+- include_vars:
+ dir: 'vars'
+ files_matching: 'bastion.yml'
+
+# Include only all yml files exception bastion.yml
+- include_vars:
+ dir: 'vars'
+ ignore_files: 'bastion.yml'
"""
diff --git a/utilities/logic/pause.py b/utilities/logic/pause.py
index 75d2db1a73a..0fed099b700 100644
--- a/utilities/logic/pause.py
+++ b/utilities/logic/pause.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: pause
@@ -47,11 +51,13 @@
EXAMPLES = '''
# Pause for 5 minutes to build app cache.
-- pause: minutes=5
+- pause:
+ minutes: 5
# Pause until you can verify updates to an application were successful.
- pause:
# A helpful reminder of what to look out for post-update.
-- pause: prompt="Make sure org.foo.FooOverload exception is not present"
+- pause:
+ prompt: "Make sure org.foo.FooOverload exception is not present"
'''
diff --git a/utilities/logic/set_fact.py b/utilities/logic/set_fact.py
index edd41119ada..c106ef74e63 100644
--- a/utilities/logic/set_fact.py
+++ b/utilities/logic/set_fact.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
author: "Dag Wieers (@dagwieers)"
@@ -42,7 +46,9 @@
EXAMPLES = '''
# Example setting host facts using key=value pairs, note that this always creates strings or booleans
-- set_fact: one_fact="something" other_fact="{{ local_var }}"
+- set_fact:
+ one_fact: "something"
+ other_fact: "{{ local_var }}"
# Example setting host facts using complex arguments
- set_fact:
diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py
index d1cf928af64..55f88fc8c84 100644
--- a/utilities/logic/wait_for.py
+++ b/utilities/logic/wait_for.py
@@ -27,6 +27,8 @@
import sys
import time
+from ansible.module_utils._text import to_native
+
HAS_PSUTIL = False
try:
import psutil
@@ -35,6 +37,10 @@
except ImportError:
pass
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: wait_for
@@ -78,28 +84,39 @@
description:
- port number to poll
required: false
+ default: null
state:
description:
- either C(present), C(started), or C(stopped), C(absent), or C(drained)
- When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections
- When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed
choices: [ "present", "started", "stopped", "absent", "drained" ]
+ required: False
default: "started"
path:
version_added: "1.4"
required: false
+ default: null
description:
- path to a file on the filesytem that must exist before continuing
search_regex:
version_added: "1.4"
required: false
+ default: null
description:
- Can be used to match a string in either a file or a socket connection. Defaults to a multiline regex.
exclude_hosts:
version_added: "1.8"
required: false
+ default: null
description:
- list of hosts or IPs to ignore when looking for active TCP connections for C(drained) state
+ sleep:
+ version_added: "2.3"
+ required: false
+ default: 1
+ description:
+ - Number of seconds to sleep between checks, before 2.3 this was hardcoded to 1 second.
notes:
- The ability to use search_regex with a port connection was added in 1.7.
requirements: []
@@ -112,30 +129,50 @@
EXAMPLES = '''
# wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds
-- wait_for: port=8000 delay=10
+- wait_for:
+ port: 8000
+ delay: 10
# wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds
-- wait_for: host=0.0.0.0 port=8000 delay=10 state=drained
+- wait_for:
+ host: 0.0.0.0
+ port: 8000
+ delay: 10
+ state: drained
# wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts
-- wait_for: host=0.0.0.0 port=8000 state=drained exclude_hosts=10.2.1.2,10.2.1.3
+- wait_for:
+ host: 0.0.0.0
+ port: 8000
+ state: drained
+ exclude_hosts: 10.2.1.2,10.2.1.3
# wait until the file /tmp/foo is present before continuing
-- wait_for: path=/tmp/foo
+- wait_for:
+ path: /tmp/foo
# wait until the string "completed" is in the file /tmp/foo before continuing
-- wait_for: path=/tmp/foo search_regex=completed
+- wait_for:
+ path: /tmp/foo
+ search_regex: completed
# wait until the lock file is removed
-- wait_for: path=/var/lock/file.lock state=absent
+- wait_for:
+ path: /var/lock/file.lock
+ state: absent
# wait until the process is finished and pid was destroyed
-- wait_for: path=/proc/3466/status state=absent
+- wait_for:
+ path: /proc/3466/status
+ state: absent
# wait 300 seconds for port 22 to become open and contain "OpenSSH", don't assume the inventory_hostname is resolvable
# and don't start checking for 10 seconds
-- local_action: wait_for port=22 host="{{ ansible_ssh_host | default(inventory_hostname) }}" search_regex=OpenSSH delay=10
-
+- local_action: wait_for
+ port: 22
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ search_regex: OpenSSH
+ delay: 10
'''
class TCPConnectionInfo(object):
@@ -362,7 +399,8 @@ def main():
path=dict(default=None, type='path'),
search_regex=dict(default=None),
state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']),
- exclude_hosts=dict(default=None, type='list')
+ exclude_hosts=dict(default=None, type='list'),
+ sleep=dict(default=1, type='int')
),
)
@@ -407,8 +445,6 @@ def main():
try:
f = open(path)
f.close()
- time.sleep(1)
- pass
except IOError:
break
elif port:
@@ -416,11 +452,10 @@ def main():
s = _create_connection(host, port, connect_timeout)
s.shutdown(socket.SHUT_RDWR)
s.close()
- time.sleep(1)
except:
break
- else:
- time.sleep(1)
+ # Conditions not yet met, wait and try again
+ time.sleep(params['sleep'])
else:
elapsed = datetime.datetime.now() - start
if port:
@@ -480,7 +515,7 @@ def main():
if not response:
# Server shutdown
break
- data += response
+ data += to_native(response, errors='surrogate_or_strict')
if re.search(compiled_search_re, data):
matched = True
break
@@ -498,7 +533,7 @@ def main():
break
# Conditions not yet met, wait and try again
- time.sleep(1)
+ time.sleep(params['sleep'])
else: # while-else
# Timeout expired
@@ -524,7 +559,8 @@ def main():
break
except IOError:
pass
- time.sleep(1)
+ # Conditions not yet met, wait and try again
+ time.sleep(params['sleep'])
else:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds)
diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py
index 16a9998845f..34d736d4d7c 100644
--- a/web_infrastructure/apache2_module.py
+++ b/web_infrastructure/apache2_module.py
@@ -16,6 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see .
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: apache2_module
@@ -47,10 +51,14 @@
EXAMPLES = '''
# enables the Apache2 module "wsgi"
-- apache2_module: state=present name=wsgi
+- apache2_module:
+ state: present
+ name: wsgi
# disables the Apache2 module "wsgi"
-- apache2_module: state=absent name=wsgi
+- apache2_module:
+ state: absent
+ name: wsgi
'''
import re
@@ -80,6 +88,12 @@ def _module_is_enabled(module):
result, stdout, stderr = module.run_command("%s -M" % control_binary)
+ """
+ Work around for Ubuntu Xenial listing php7_module as php7.0
+ """
+ if name == "php7.0":
+ name = "php7"
+
if result != 0:
module.fail_json(msg="Error executing %s: %s" % (control_binary, stderr))
diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py
index 3ce815dc582..efd32a33a18 100644
--- a/web_infrastructure/django_manage.py
+++ b/web_infrastructure/django_manage.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: django_manage
@@ -99,24 +103,34 @@
EXAMPLES = """
# Run cleanup on the application installed in 'django_dir'.
-- django_manage: command=cleanup app_path={{ django_dir }}
+- django_manage:
+ command: cleanup
+ app_path: "{{ django_dir }}"
# Load the initial_data fixture into the application
-- django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }}
+- django_manage:
+ command: loaddata
+ app_path: "{{ django_dir }}"
+ fixtures: "{{ initial_data }}"
# Run syncdb on the application
-- django_manage: >
- command=syncdb
- app_path={{ django_dir }}
- settings={{ settings_app_name }}
- pythonpath={{ settings_dir }}
- virtualenv={{ virtualenv_dir }}
+- django_manage:
+ command: syncdb
+ app_path: "{{ django_dir }}"
+ settings: "{{ settings_app_name }}"
+ pythonpath: "{{ settings_dir }}"
+ virtualenv: "{{ virtualenv_dir }}"
# Run the SmokeTest test case from the main app. Useful for testing deploys.
-- django_manage: command=test app_path={{ django_dir }} apps=main.SmokeTest
+- django_manage:
+ command: test
+ app_path: "{{ django_dir }}"
+ apps: main.SmokeTest
# Create an initial superuser.
-- django_manage: command="createsuperuser --noinput --username=admin --email=admin@example.com" app_path={{ django_dir }}
+- django_manage:
+ command: "createsuperuser --noinput --username=admin --email=admin@example.com"
+ app_path: "{{ django_dir }}"
"""
@@ -284,4 +298,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py
index 3ad9de6e60f..0c5d8bea9d6 100644
--- a/web_infrastructure/htpasswd.py
+++ b/web_infrastructure/htpasswd.py
@@ -18,6 +18,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = """
module: htpasswd
version_added: "1.3"
@@ -74,11 +78,26 @@
EXAMPLES = """
# Add a user to a password file and ensure permissions are set
-- htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640
+- htpasswd:
+ path: /etc/nginx/passwdfile
+ name: janedoe
+ password: '9s36?;fyNp'
+ owner: root
+ group: www-data
+ mode: 0640
+
# Remove a user from a password file
-- htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent
+- htpasswd:
+ path: /etc/apache2/passwdfile
+ name: foobar
+ state: absent
+
# Add a user to a password file suitable for use by libpam-pwdfile
-- htpasswd: path=/etc/mail/passwords name=alex password=oedu2eGh crypt_scheme=md5_crypt
+- htpasswd:
+ path: /etc/mail/passwords
+ name: alex
+ password: oedu2eGh
+ crypt_scheme: md5_crypt
"""
@@ -257,7 +276,8 @@ def main():
# import module snippets
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py
index 6ac5bc7de67..84c8ece749f 100644
--- a/web_infrastructure/supervisorctl.py
+++ b/web_infrastructure/supervisorctl.py
@@ -19,6 +19,11 @@
# along with Ansible. If not, see .
#
import os
+from ansible.module_utils.basic import AnsibleModule, is_executable
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = '''
---
@@ -82,16 +87,28 @@
EXAMPLES = '''
# Manage the state of program to be in 'started' state.
-- supervisorctl: name=my_app state=started
+- supervisorctl:
+ name: my_app
+ state: started
# Manage the state of program group to be in 'started' state.
-- supervisorctl: name='my_apps:' state=started
+- supervisorctl:
+ name: 'my_apps:'
+ state: started
# Restart my_app, reading supervisorctl configuration from a specified file.
-- supervisorctl: name=my_app state=restarted config=/var/opt/my_project/supervisord.conf
+- supervisorctl:
+ name: my_app
+ state: restarted
+ config: /var/opt/my_project/supervisord.conf
# Restart my_app, connecting to supervisord with credentials and server URL.
-- supervisorctl: name=my_app state=restarted username=test password=testpass server_url=http://localhost:9001
+- supervisorctl:
+ name: my_app
+ state: restarted
+ username: test
+ password: testpass
+ server_url: http://localhost:9001
'''
@@ -101,7 +118,7 @@ def main():
config=dict(required=False, type='path'),
server_url=dict(required=False),
username=dict(required=False),
- password=dict(required=False),
+ password=dict(required=False, no_log=True),
supervisorctl_path=dict(required=False, type='path'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent'])
)
@@ -239,8 +256,5 @@ def take_action_on_processes(processes, status_filter, action, expected_result):
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped')
-# import module snippets
-from ansible.module_utils.basic import *
-# is_executable from basic
if __name__ == '__main__':
main()
diff --git a/windows/async_status.ps1 b/windows/async_status.ps1
new file mode 100644
index 00000000000..efde748fb97
--- /dev/null
+++ b/windows/async_status.ps1
@@ -0,0 +1,69 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+$results = @{changed=$false}
+
+$parsed_args = Parse-Args $args
+$jid = Get-AnsibleParam $parsed_args "jid" -failifempty $true -resultobj $results
+$mode = Get-AnsibleParam $parsed_args "mode" -Default "status" -ValidateSet "status","cleanup"
+
+# setup logging directory
+$log_path = [System.IO.Path]::Combine($env:LOCALAPPDATA, ".ansible_async", $jid)
+
+If(-not $(Test-Path $log_path))
+{
+ Fail-Json @{ansible_job_id=$jid; started=1; finished=1} "could not find job"
+}
+
+If($mode -eq "cleanup") {
+ Remove-Item $log_path -Recurse
+ Exit-Json @{ansible_job_id=$jid; erased=$log_path}
+}
+
+# NOT in cleanup mode, assume regular status mode
+# no remote kill mode currently exists, but probably should
+# consider log_path + ".pid" file and also unlink that above
+
+$data = $null
+Try {
+ $data_raw = Get-Content $log_path
+
+ # TODO: move this into module_utils/powershell.ps1?
+ $jss = New-Object System.Web.Script.Serialization.JavaScriptSerializer
+ $data = $jss.DeserializeObject($data_raw)
+}
+Catch {
+ If(-not $data_raw) {
+ # file not written yet? That means it is running
+ Exit-Json @{results_file=$log_path; ansible_job_id=$jid; started=1; finished=0}
+ }
+ Else {
+ Fail-Json @{ansible_job_id=$jid; results_file=$log_path; started=1; finished=1} "Could not parse job output: $data"
+ }
+}
+
+If (-not $data.ContainsKey("started")) {
+ $data['finished'] = 1
+ $data['ansible_job_id'] = $jid
+}
+ElseIf (-not $data.ContainsKey("finished")) {
+ $data['finished'] = 0
+}
+
+Exit-Json $data
diff --git a/windows/async_wrapper.ps1 b/windows/async_wrapper.ps1
new file mode 100644
index 00000000000..a79a6d6bb13
--- /dev/null
+++ b/windows/async_wrapper.ps1
@@ -0,0 +1,447 @@
+#!powershell
+# This file is part of Ansible
+#
+# Copyright (c)2016, Matt Davis
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+Param(
+ [string]$jid,
+ [int]$max_exec_time_sec,
+ [string]$module_path,
+ [string]$argfile_path,
+ [switch]$preserve_tmp
+)
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+Set-StrictMode -Version 2
+$ErrorActionPreference = "Stop"
+
+Function Start-Watchdog {
+ Param(
+ [string]$module_tempdir,
+ [string]$module_path,
+ [int]$max_exec_time_sec,
+ [string]$resultfile_path,
+ [string]$argfile_path,
+ [switch]$preserve_tmp,
+ [switch]$start_suspended
+ )
+
+# BEGIN Ansible.Async native type definition
+ $native_process_util = @"
+ using Microsoft.Win32.SafeHandles;
+ using System;
+ using System.ComponentModel;
+ using System.Diagnostics;
+ using System.IO;
+ using System.Linq;
+ using System.Runtime.InteropServices;
+ using System.Text;
+ using System.Threading;
+
+ namespace Ansible.Async {
+
+ public static class NativeProcessUtil
+ {
+ [DllImport("kernel32.dll", SetLastError=true, CharSet=CharSet.Unicode)]
+ public static extern bool CreateProcess(
+ string lpApplicationName,
+ string lpCommandLine,
+ IntPtr lpProcessAttributes,
+ IntPtr lpThreadAttributes,
+ bool bInheritHandles,
+ uint dwCreationFlags,
+ IntPtr lpEnvironment,
+ string lpCurrentDirectory,
+ [In] ref STARTUPINFO lpStartupInfo,
+ out PROCESS_INFORMATION lpProcessInformation);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet=CharSet.Unicode)]
+ public static extern uint SearchPath (
+ string lpPath,
+ string lpFileName,
+ string lpExtension,
+ int nBufferLength,
+ [MarshalAs (UnmanagedType.LPTStr)]
+ StringBuilder lpBuffer,
+ out IntPtr lpFilePart);
+
+ public static string SearchPath(string findThis)
+ {
+ StringBuilder sbOut = new StringBuilder(1024);
+ IntPtr filePartOut;
+
+ if(SearchPath(null, findThis, null, sbOut.Capacity, sbOut, out filePartOut) == 0)
+ throw new FileNotFoundException("Couldn't locate " + findThis + " on path");
+
+ return sbOut.ToString();
+ }
+
+ [DllImport("kernel32.dll", SetLastError=true)]
+ static extern SafeFileHandle OpenThread(
+ ThreadAccessRights dwDesiredAccess,
+ bool bInheritHandle,
+ int dwThreadId);
+
+ [DllImport("kernel32.dll", SetLastError=true)]
+ static extern int ResumeThread(SafeHandle hThread);
+
+ public static void ResumeThreadById(int threadId)
+ {
+ var threadHandle = OpenThread(ThreadAccessRights.SUSPEND_RESUME, false, threadId);
+ if(threadHandle.IsInvalid)
+ throw new Exception(String.Format("Thread ID {0} is invalid ({1})", threadId, new Win32Exception(Marshal.GetLastWin32Error()).Message));
+
+ try
+ {
+ if(ResumeThread(threadHandle) == -1)
+ throw new Exception(String.Format("Thread ID {0} cannot be resumed ({1})", threadId, new Win32Exception(Marshal.GetLastWin32Error()).Message));
+ }
+ finally
+ {
+ threadHandle.Dispose();
+ }
+ }
+
+ public static void ResumeProcessById(int pid)
+ {
+ var proc = Process.GetProcessById(pid);
+
+ // wait for at least one suspended thread in the process (this handles possible slow startup race where primary thread of created-suspended process has not yet become runnable)
+ var retryCount = 0;
+ while(!proc.Threads.OfType().Any(t=>t.ThreadState == System.Diagnostics.ThreadState.Wait && t.WaitReason == ThreadWaitReason.Suspended))
+ {
+ proc.Refresh();
+ Thread.Sleep(50);
+ if (retryCount > 100)
+ throw new InvalidOperationException(String.Format("No threads were suspended in target PID {0} after 5s", pid));
+ }
+
+ foreach(var thread in proc.Threads.OfType().Where(t => t.ThreadState == System.Diagnostics.ThreadState.Wait && t.WaitReason == ThreadWaitReason.Suspended))
+ ResumeThreadById(thread.Id);
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct STARTUPINFO
+ {
+ public Int32 cb;
+ public string lpReserved;
+ public string lpDesktop;
+ public string lpTitle;
+ public Int32 dwX;
+ public Int32 dwY;
+ public Int32 dwXSize;
+ public Int32 dwYSize;
+ public Int32 dwXCountChars;
+ public Int32 dwYCountChars;
+ public Int32 dwFillAttribute;
+ public Int32 dwFlags;
+ public Int16 wShowWindow;
+ public Int16 cbReserved2;
+ public IntPtr lpReserved2;
+ public IntPtr hStdInput;
+ public IntPtr hStdOutput;
+ public IntPtr hStdError;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct PROCESS_INFORMATION
+ {
+ public IntPtr hProcess;
+ public IntPtr hThread;
+ public int dwProcessId;
+ public int dwThreadId;
+ }
+
+ [Flags]
+ enum ThreadAccessRights : uint
+ {
+ SUSPEND_RESUME = 0x0002
+ }
+ }
+"@ # END Ansible.Async native type definition
+
+ Add-Type -TypeDefinition $native_process_util
+
+ $watchdog_script = {
+ Set-StrictMode -Version 2
+ $ErrorActionPreference = "Stop"
+
+ Function Log {
+ Param(
+ [string]$msg
+ )
+
+ If(Get-Variable -Name log_path -ErrorAction SilentlyContinue) {
+ Add-Content $log_path $msg
+ }
+ }
+
+ Add-Type -AssemblyName System.Web.Extensions
+
+ # -EncodedCommand won't allow us to pass args, so they have to be templated into the script
+ $jsonargs = @"
+ <>
+"@
+ Function Deserialize-Json {
+ Param(
+ [Parameter(ValueFromPipeline=$true)]
+ [string]$json
+ )
+
+ # FUTURE: move this into module_utils/powershell.ps1 and use for everything (sidestep PSCustomObject issues)
+ # FUTURE: won't work w/ Nano Server/.NET Core- fallback to DataContractJsonSerializer (which can't handle dicts on .NET 4.0)
+
+ Log "Deserializing:`n$json"
+
+ $jss = New-Object System.Web.Script.Serialization.JavaScriptSerializer
+ return $jss.DeserializeObject($json)
+ }
+
+ Function Write-Result {
+ [hashtable]$result,
+ [string]$resultfile_path
+
+ $result | ConvertTo-Json | Set-Content -Path $resultfile_path
+ }
+
+ Function Exec-Module {
+ Param(
+ [string]$module_tempdir,
+ [string]$module_path,
+ [int]$max_exec_time_sec,
+ [string]$resultfile_path,
+ [string]$argfile_path,
+ [switch]$preserve_tmp
+ )
+
+ Log "in watchdog exec"
+
+ Try
+ {
+ Log "deserializing existing resultfile args"
+ # read in existing resultsfile to merge w/ module output (it should be written by the time we're unsuspended and running)
+ $result = Get-Content $resultfile_path -Raw | Deserialize-Json
+
+ Log "deserialized result is $($result | Out-String)"
+
+ Log "creating runspace"
+
+ $rs = [runspacefactory]::CreateRunspace()
+ $rs.Open()
+ $rs.SessionStateProxy.Path.SetLocation($module_tempdir) | Out-Null
+
+ Log "creating Powershell object"
+
+ $job = [powershell]::Create()
+ $job.Runspace = $rs
+
+ Log "adding scripts"
+
+ if($module_path.EndsWith(".ps1")) {
+ $job.AddScript($module_path) | Out-Null
+ }
+ else {
+ $job.AddCommand($module_path) | Out-Null
+ $job.AddArgument($argfile_path) | Out-Null
+ }
+
+ Log "job BeginInvoke()"
+
+ $job_asyncresult = $job.BeginInvoke()
+
+ Log "waiting $max_exec_time_sec seconds for job to complete"
+
+ $signaled = $job_asyncresult.AsyncWaitHandle.WaitOne($max_exec_time_sec * 1000)
+
+ $result["finished"] = 1
+
+ If($job_asyncresult.IsCompleted) {
+ Log "job completed, calling EndInvoke()"
+
+ $job_output = $job.EndInvoke($job_asyncresult)
+ $job_error = $job.Streams.Error
+
+ Log "raw module stdout: \r\n$job_output"
+ If($job_error) {
+ Log "raw module stderr: \r\n$job_error"
+ }
+
+ # write success/output/error to result object
+
+ # TODO: cleanse leading/trailing junk
+ Try {
+ $module_result = Deserialize-Json $job_output
+ # TODO: check for conflicting keys
+ $result = $result + $module_result
+ }
+ Catch {
+ $excep = $_
+
+ $result.failed = $true
+ $result.msg = "failed to parse module output: $excep"
+ }
+
+ # TODO: determine success/fail, or always include stderr if nonempty?
+ Write-Result $result $resultfile_path
+
+ Log "wrote output to $resultfile_path"
+ }
+ Else {
+ $job.Stop()
+ # write timeout to result object
+ $result.failed = $true
+ $result.msg = "timed out waiting for module completion"
+ Write-Result $result $resultfile_path
+
+ Log "wrote timeout to $resultfile_path"
+ }
+
+ $rs.Close() | Out-Null
+ }
+ Catch {
+ $excep = $_
+
+ $result = @{failed=$true; msg="module execution failed: $($excep.ToString())`n$($excep.InvocationInfo.PositionMessage)"}
+
+ Write-Result $result $resultfile_path
+ }
+ Finally
+ {
+ If(-not $preserve_tmp -and $module_tempdir -imatch "-tmp-") {
+ Try {
+ Log "deleting tempdir, cwd is $(Get-Location)"
+ Set-Location $env:USERPROFILE
+ $res = Remove-Item $module_tempdir -recurse 2>&1
+ Log "delete output was $res"
+ }
+ Catch {
+ $excep = $_
+ Log "error deleting tempdir: $excep"
+ }
+ }
+ Else {
+ Log "skipping tempdir deletion"
+ }
+ }
+ }
+
+ Try {
+ Log "deserializing args"
+
+ # deserialize the JSON args that should've been templated in before execution
+ $ext_args = Deserialize-Json $jsonargs
+
+ Log "exec module"
+
+ Exec-Module @ext_args
+
+ Log "exec done"
+ }
+ Catch {
+ $excep = $_
+
+ Log $excep
+ }
+ }
+
+ $bp = [hashtable] $MyInvocation.BoundParameters
+ # convert switch types to bool so they'll serialize as simple bools
+ $bp["preserve_tmp"] = [bool]$bp["preserve_tmp"]
+ $bp["start_suspended"] = [bool]$bp["start_suspended"]
+
+ # serialize this function's args to JSON so we can template them verbatim into the script(block)
+ $jsonargs = $bp | ConvertTo-Json
+
+ $raw_script = $watchdog_script.ToString()
+ $raw_script = $raw_script.Replace("<>", $jsonargs)
+
+ $encoded_command = [Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($raw_script))
+
+ # FUTURE: create under new job to ensure all children die on exit?
+
+ # FUTURE: move these flags into C# enum
+ # start process suspended + breakaway so we can record the watchdog pid without worrying about a completion race
+ Set-Variable CREATE_BREAKAWAY_FROM_JOB -Value ([uint32]0x01000000) -Option Constant
+ Set-Variable CREATE_SUSPENDED -Value ([uint32]0x00000004) -Option Constant
+ Set-Variable CREATE_UNICODE_ENVIRONMENT -Value ([uint32]0x000000400) -Option Constant
+ Set-Variable CREATE_NEW_CONSOLE -Value ([uint32]0x00000010) -Option Constant
+
+ $pstartup_flags = $CREATE_BREAKAWAY_FROM_JOB -bor $CREATE_UNICODE_ENVIRONMENT -bor $CREATE_NEW_CONSOLE
+ If($start_suspended) {
+ $pstartup_flags = $pstartup_flags -bor $CREATE_SUSPENDED
+ }
+
+ # execute the dynamic watchdog as a breakway process, which will in turn exec the module
+ $si = New-Object Ansible.Async.STARTUPINFO
+ $si.cb = [System.Runtime.InteropServices.Marshal]::SizeOf([type][Ansible.Async.STARTUPINFO])
+
+ $pi = New-Object Ansible.Async.PROCESS_INFORMATION
+
+ # FUTURE: direct cmdline CreateProcess path lookup fails- this works but is sub-optimal
+ $exec_cmd = [Ansible.Async.NativeProcessUtil]::SearchPath("powershell.exe")
+ $exec_args = "`"$exec_cmd`" -NoProfile -ExecutionPolicy Bypass -EncodedCommand $encoded_command"
+
+ If(-not [Ansible.Async.NativeProcessUtil]::CreateProcess($exec_cmd, $exec_args, [IntPtr]::Zero, [IntPtr]::Zero, $false, $pstartup_flags, [IntPtr]::Zero, $env:windir, [ref]$si, [ref]$pi)) {
+ #throw New-Object System.ComponentModel.Win32Exception
+ throw "create bang $([System.Runtime.InteropServices.Marshal]::GetLastWin32Error())"
+ }
+
+ $watchdog_pid = $pi.dwProcessId
+
+ return $watchdog_pid
+}
+
+$local_jid = $jid + "." + $pid
+
+$results_path = [System.IO.Path]::Combine($env:LOCALAPPDATA, ".ansible_async", $local_jid)
+
+[System.IO.Directory]::CreateDirectory([System.IO.Path]::GetDirectoryName($results_path)) | Out-Null
+
+$watchdog_args = @{
+ module_tempdir=$([System.IO.Path]::GetDirectoryName($module_path));
+ module_path=$module_path;
+ max_exec_time_sec=$max_exec_time_sec;
+ resultfile_path=$results_path;
+ argfile_path=$argfile_path;
+ start_suspended=$true;
+}
+
+If($preserve_tmp) {
+ $watchdog_args["preserve_tmp"] = $true
+}
+
+# start watchdog/module-exec
+$watchdog_pid = Start-Watchdog @watchdog_args
+
+# populate initial results before we resume the process to avoid result race
+$result = @{
+ started=1;
+ finished=0;
+ results_file=$results_path;
+ ansible_job_id=$local_jid;
+ _suppress_tmpdir_delete=$true;
+ ansible_async_watchdog_pid=$watchdog_pid
+}
+
+$result_json = ConvertTo-Json $result
+Set-Content $results_path -Value $result_json
+
+[Ansible.Async.NativeProcessUtil]::ResumeProcessById($watchdog_pid)
+
+return $result_json
diff --git a/windows/setup.ps1 b/windows/setup.ps1
index ff7ee2e5942..fef2ade25c4 100644
--- a/windows/setup.ps1
+++ b/windows/setup.ps1
@@ -17,9 +17,7 @@
# WANT_JSON
# POWERSHELL_COMMON
-# enabled $params (David O'Brien, 06/08/2015)
-$params = Parse-Args $args;
-
+$params = Parse-Args $args -supports_check_mode $true
Function Get-CustomFacts {
[cmdletBinding()]
@@ -127,7 +125,6 @@ Set-Attr $result.ansible_facts "ansible_owner_contact" ([string] $win32_cs.Prima
Set-Attr $result.ansible_facts "ansible_user_dir" $env:userprofile
Set-Attr $result.ansible_facts "ansible_user_gecos" "" # Win32_UserAccount.FullName is probably the right thing here, but it can be expensive to get on large domains
Set-Attr $result.ansible_facts "ansible_user_id" $env:username
-Set-Attr $result.ansible_facts "ansible_user_uid" ([int] $user.User.Value.Substring(42))
Set-Attr $result.ansible_facts "ansible_user_sid" $user.User.Value
$date = New-Object psobject
diff --git a/windows/win_command.ps1 b/windows/win_command.ps1
new file mode 100644
index 00000000000..316654b81d7
--- /dev/null
+++ b/windows/win_command.ps1
@@ -0,0 +1,161 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+# TODO: add check mode support
+
+Set-StrictMode -Version 2
+$ErrorActionPreference = "Stop"
+
+$parsed_args = Parse-Args $args $false
+
+$raw_command_line = $(Get-AnsibleParam $parsed_args "_raw_params" -failifempty $true).Trim()
+$chdir = Get-AnsibleParam $parsed_args "chdir"
+$creates = Get-AnsibleParam $parsed_args "creates"
+$removes = Get-AnsibleParam $parsed_args "removes"
+
+$result = @{changed=$true; warnings=@(); cmd=$raw_command_line}
+
+If($creates -and $(Test-Path $creates)) {
+ Exit-Json @{cmd=$raw_command_line; msg="skipped, since $creates exists"; changed=$false; skipped=$true; rc=0}
+}
+
+If($removes -and -not $(Test-Path $removes)) {
+ Exit-Json @{cmd=$raw_command_line; msg="skipped, since $removes does not exist"; changed=$false; skipped=$true; rc=0}
+}
+
+$util_def = @'
+using System;
+using System.ComponentModel;
+using System.Diagnostics;
+using System.IO;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Threading;
+
+namespace Ansible.Command
+{
+ public static class NativeUtil
+ {
+ [DllImport("shell32.dll", SetLastError = true)]
+ static extern IntPtr CommandLineToArgvW([MarshalAs(UnmanagedType.LPWStr)] string lpCmdLine, out int pNumArgs);
+
+ public static string[] ParseCommandLine(string cmdline)
+ {
+ int numArgs;
+ IntPtr ret = CommandLineToArgvW(cmdline, out numArgs);
+
+ if (ret == IntPtr.Zero)
+ throw new Exception(String.Format("Error parsing command line: {0}", new Win32Exception(Marshal.GetLastWin32Error()).Message));
+
+ IntPtr[] strptrs = new IntPtr[numArgs];
+ Marshal.Copy(ret, strptrs, 0, numArgs);
+ string[] cmdlineParts = strptrs.Select(s=>Marshal.PtrToStringUni(s)).ToArray();
+
+ Marshal.FreeHGlobal(ret);
+
+ return cmdlineParts;
+ }
+
+ public static void GetProcessOutput(StreamReader stdoutStream, StreamReader stderrStream, out string stdout, out string stderr)
+ {
+ var sowait = new EventWaitHandle(false, EventResetMode.ManualReset);
+ var sewait = new EventWaitHandle(false, EventResetMode.ManualReset);
+
+ string so = null, se = null;
+
+ ThreadPool.QueueUserWorkItem((s)=>
+ {
+ so = stdoutStream.ReadToEnd();
+ sowait.Set();
+ });
+
+ ThreadPool.QueueUserWorkItem((s) =>
+ {
+ se = stderrStream.ReadToEnd();
+ sewait.Set();
+ });
+
+ foreach(var wh in new WaitHandle[] { sowait, sewait })
+ wh.WaitOne();
+
+ stdout = so;
+ stderr = se;
+ }
+ }
+}
+'@
+
+$util_type = Add-Type -TypeDefinition $util_def
+
+# FUTURE: extract this code to separate module_utils as Windows module API version of run_command
+
+$exec_args = $null
+
+# Parse the command-line with the Win32 parser to get the application name to run. The Win32 parser
+# will deal with quoting/escaping for us...
+# FUTURE: no longer necessary once we switch to raw Win32 CreateProcess
+$parsed_command_line = [Ansible.Command.NativeUtil]::ParseCommandLine($raw_command_line);
+$exec_application = $parsed_command_line[0]
+If($parsed_command_line.Length -gt 1) {
+ # lop the application off, then rejoin the args as a single string
+ $exec_args = $parsed_command_line[1..$($parsed_command_line.Length-1)] -join " "
+}
+
+$proc = New-Object System.Diagnostics.Process
+$psi = $proc.StartInfo
+$psi.FileName = $exec_application
+$psi.Arguments = $exec_args
+$psi.RedirectStandardOutput = $true
+$psi.RedirectStandardError = $true
+$psi.UseShellExecute = $false
+
+If ($chdir) {
+ $psi.WorkingDirectory = $chdir
+}
+
+$start_datetime = [DateTime]::UtcNow
+
+Try {
+ $proc.Start() | Out-Null # will always return $true for non shell-exec cases
+}
+Catch [System.ComponentModel.Win32Exception] {
+ # fail nicely for "normal" error conditions
+ # FUTURE: this probably won't work on Nano Server
+ $excep = $_
+ Exit-Json @{failed=$true;changed=$false;cmd=$raw_command_line;rc=$excep.Exception.NativeErrorCode;msg=$excep.Exception.Message}
+}
+
+$stdout = $stderr = [string] $null
+
+[Ansible.Command.NativeUtil]::GetProcessOutput($proc.StandardOutput, $proc.StandardError, [ref] $stdout, [ref] $stderr) | Out-Null
+
+$result.stdout = $stdout
+$result.stderr = $stderr
+
+$proc.WaitForExit() | Out-Null
+
+$result.rc = $proc.ExitCode
+
+$end_datetime = [DateTime]::UtcNow
+
+$result.start = $start_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$result.end = $end_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$result.delta = $($end_datetime - $start_datetime).ToString("h\:mm\:ss\.ffffff")
+
+Exit-Json $result
diff --git a/windows/win_command.py b/windows/win_command.py
new file mode 100644
index 00000000000..2a131c4bc45
--- /dev/null
+++ b/windows/win_command.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Ansible, inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: win_command
+short_description: Executes a command on a remote Windows node
+version_added: 2.2
+description:
+ - The M(win_command) module takes the command name followed by a list of space-delimited arguments.
+ - The given command will be executed on all selected nodes. It will not be
+ processed through the shell, so variables like C($env:HOME) and operations
+ like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell)
+ module if you need these features).
+options:
+ free_form:
+ description:
+ - the win_command module takes a free form command to run. There is no parameter actually named 'free form'.
+ See the examples!
+ required: true
+ creates:
+ description:
+ - a path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
+ removes:
+ description:
+ - a path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
+ chdir:
+ description:
+ - set the specified path as the current working directory before executing a command
+notes:
+ - If you want to run a command through a shell (say you are using C(<),
+ C(>), C(|), etc), you actually want the M(win_shell) module instead. The
+ M(win_command) module is much more secure as it's not affected by the user's
+ environment.
+ - " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this."
+author:
+ - Matt Davis
+'''
+
+EXAMPLES = '''
+# Example from Ansible Playbooks.
+- win_command: whoami
+ register: whoami_out
+
+# Run the command only if the specified file does not exist.
+- win_command: wbadmin -backupTarget:c:\\backup\\ creates=c:\\backup\\
+
+# You can also use the 'args' form to provide the options. This command
+# will change the working directory to c:\\somedir\\ and will only run when
+# c:\\backup\\ doesn't exist.
+- win_command: wbadmin -backupTarget:c:\\backup\\ creates=c:\\backup\\
+ args:
+ chdir: c:\\somedir\\
+ creates: c:\\backup\\
+'''
+
+RETURN = '''
+msg:
+ description: changed
+ returned: always
+ type: boolean
+ sample: True
+start:
+ description: The command execution start time
+ returned: always
+ type: string
+ sample: '2016-02-25 09:18:26.429568'
+end:
+ description: The command execution end time
+ returned: always
+ type: string
+ sample: '2016-02-25 09:18:26.755339'
+delta:
+ description: The command execution delta time
+ returned: always
+ type: string
+ sample: '0:00:00.325771'
+stdout:
+ description: The command standard output
+ returned: always
+ type: string
+ sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
+stderr:
+ description: The command standard error
+ returned: always
+ type: string
+ sample: 'ls: cannot access foo: No such file or directory'
+cmd:
+ description: The command executed by the task
+ returned: always
+ type: string
+ sample: 'rabbitmqctl join_cluster rabbit@master'
+rc:
+ description: The command return code (0 means success)
+ returned: always
+ type: int
+ sample: 0
+stdout_lines:
+ description: The command standard output split in lines
+ returned: always
+ type: list of strings
+ sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
+'''
diff --git a/windows/win_copy.py b/windows/win_copy.py
index 4f65d6827c7..1b81f0cac89 100755
--- a/windows/win_copy.py
+++ b/windows/win_copy.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_copy
@@ -47,25 +51,22 @@
'''
EXAMPLES = '''
-# Copy a single file
-- win_copy: src=/srv/myfiles/foo.conf dest=c:\\TEMP\\foo.conf
-
-# Copy the contents of files/temp_files dir into c:\temp\. Includes any sub dirs under files/temp_files
-# Note the use of unix style path in the dest.
-# This is necessary because \ is yaml escape sequence
-- win_copy: src=files/temp_files/ dest=c:/temp/
-
-# Copy the files/temp_files dir and any files or sub dirs into c:\temp
-# Copies the folder because there is no trailing / on 'files/temp_files'
-- win_copy: src=files/temp_files dest=c:/temp/
+- name: Copy a single file
+ win_copy:
+ src: /srv/myfiles/foo.conf
+ dest: c:\TEMP\foo.conf
+- name: Copy files/temp_files to c:\temp
+ win_copy:
+ src: files/temp_files/
+ dest: c:\temp
'''
RETURN = '''
dest:
description: destination file/path
returned: changed
type: string
- sample: "c:/temp/"
+ sample: 'c:\temp'
src:
description: source file used for the copy on the target machine
returned: changed
diff --git a/windows/win_feature.py b/windows/win_feature.py
index 04226c609a8..2fb6fe3718b 100644
--- a/windows/win_feature.py
+++ b/windows/win_feature.py
@@ -21,13 +21,17 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_feature
version_added: "1.7"
-short_description: Installs and uninstalls Windows Features
+short_description: Installs and uninstalls Windows Features on Windows Server
description:
- - Installs or uninstalls Windows Roles or Features
+ - Installs or uninstalls Windows Roles or Features on Windows Server. This module uses the Add/Remove-WindowsFeature Cmdlets, which is not available on client os machines.
options:
name:
description:
diff --git a/windows/win_file.ps1 b/windows/win_file.ps1
index 958f9f04fcc..e064c5c6dbd 100644
--- a/windows/win_file.ps1
+++ b/windows/win_file.ps1
@@ -102,7 +102,7 @@ Else
If ( $state -eq "directory" )
{
- New-Item -ItemType directory -Path $path
+ New-Item -ItemType directory -Path $path | Out-Null
$result.changed = $TRUE
}
diff --git a/windows/win_file.py b/windows/win_file.py
index 895da567d86..82c5510c3cf 100644
--- a/windows/win_file.py
+++ b/windows/win_file.py
@@ -19,6 +19,10 @@
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_file
@@ -46,27 +50,37 @@
If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior. If C(absent),
directories will be recursively deleted, and files will be removed.
- If C(touch), an empty file will be created if the c(path) does not
+ If C(touch), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
- modification times (similar to the way `touch` works from the command line).
+ modification times (similar to the way C(touch) works from the command line).
required: false
default: file
choices: [ file, directory, touch, absent ]
'''
EXAMPLES = '''
-# create a file
-- win_file: path=C:\\temp\\foo.conf
+- name: Create a file
+ win_file:
+ path: C:\temp\foo.conf
+ state: file
-# touch a file (creates if not present, updates modification time if present)
-- win_file: path=C:\\temp\\foo.conf state=touch
+- name: Touch a file (creates if not present, updates modification time if present)
+ win_file:
+ path: C:\temp\foo.conf
+ state: touch
-# remove a file, if present
-- win_file: path=C:\\temp\\foo.conf state=absent
+- name: Remove a file, if present
+ win_file:
+ path: C:\temp\foo.conf
+ state: absent
-# create directory structure
-- win_file: path=C:\\temp\\folder\\subfolder state=directory
+- name: Create directory structure
+ win_file:
+ path: C:\temp\folder\subfolder
+ state: directory
-# remove directory structure
-- win_file: path=C:\\temp state=absent
+- name: Remove directory structure
+ win_file:
+ path: C:\temp
+ state: absent
'''
diff --git a/windows/win_get_url.py b/windows/win_get_url.py
index 041eb563d00..6c5d5c67c0d 100644
--- a/windows/win_get_url.py
+++ b/windows/win_get_url.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_get_url
@@ -45,9 +49,9 @@
default: null
force:
description:
- - If C(yes), will always download the file. If C(no), will only
+ - If C(yes), will always download the file. If C(no), will only
download the file if it does not exist or the remote file has been
- modified more recently than the local file. This works by sending
+ modified more recently than the local file. This works by sending
an http HEAD request to retrieve last modified time of the requested
resource, so for this to work, the remote web server must support
HEAD requests.
@@ -95,29 +99,29 @@
# Playbook example
- name: Download earthrise.jpg to 'C:\\Users\\RandomUser\\earthrise.jpg'
win_get_url:
- url: 'http://www.example.com/earthrise.jpg'
- dest: 'C:\\Users\\RandomUser\\earthrise.jpg'
+ url: http://www.example.com/earthrise.jpg
+ dest: C:\Users\RandomUser\earthrise.jpg
-- name: Download earthrise.jpg to 'C:\\Users\\RandomUser\\earthrise.jpg' only if modified
+- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' only if modified
win_get_url:
- url: 'http://www.example.com/earthrise.jpg'
- dest: 'C:\\Users\\RandomUser\\earthrise.jpg'
+ url: http://www.example.com/earthrise.jpg
+ dest: C:\Users\RandomUser\earthrise.jpg
force: no
-- name: Download earthrise.jpg to 'C:\\Users\\RandomUser\\earthrise.jpg' through a proxy server.
+- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' through a proxy server.
win_get_url:
- url: 'http://www.example.com/earthrise.jpg'
- dest: 'C:\\Users\\RandomUser\\earthrise.jpg'
- proxy_url: 'http://10.0.0.1:8080'
- proxy_username: 'username'
- proxy_password: 'password'
+ url: http://www.example.com/earthrise.jpg
+ dest: C:\Users\RandomUser\earthrise.jpg
+ proxy_url: http://10.0.0.1:8080
+ proxy_username: username
+ proxy_password: password
'''
RETURN = '''
url:
description: requested url
returned: always
type: string
- sample: 'http://www.example.com/earthrise.jpg'
+ sample: http://www.example.com/earthrise.jpg
dest:
description: destination file/path
returned: always
diff --git a/windows/win_group.py b/windows/win_group.py
index 5e8b0adaaf2..035c05eff5c 100644
--- a/windows/win_group.py
+++ b/windows/win_group.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_group
@@ -54,13 +58,13 @@
'''
EXAMPLES = '''
- # Create a new group
+- name: Create a new group
win_group:
name: deploy
description: Deploy Group
state: present
- # Remove a group
+- name: Remove a group
win_group:
name: deploy
state: absent
diff --git a/windows/win_lineinfile.py b/windows/win_lineinfile.py
index c6761591619..df250d6d414 100644
--- a/windows/win_lineinfile.py
+++ b/windows/win_lineinfile.py
@@ -10,11 +10,15 @@
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
+# along with Ansible. If not, see .
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
DOCUMENTATION = """
---
@@ -31,11 +35,11 @@
aliases: [ name, destfile ]
description:
- The path of the file to modify.
- - Note that the Windows path delimiter '\' must be escaped as '\\' (see examples below)
+ - Note that the Windows path delimiter C(\) must be escaped as C(\\) when the line is double quoted.
regexp:
required: false
description:
- - "The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions; see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx)."
+ - "The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions; see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx)."
state:
required: false
choices: [ present, absent ]
@@ -58,13 +62,13 @@
default: EOF
description:
- Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is available; C(EOF) for inserting the line at the end of the file.
- - If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs).
+ - If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs).
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
description:
- Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available; C(BOF) for inserting the line at the beginning of the file.
- - If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs).
+ - If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs).
choices: [ 'BOF', '*regex*' ]
create:
required: false
@@ -81,7 +85,7 @@
validate:
required: false
description:
- - Validation to run before copying into place. Use %s in the command to indicate the current file to validate.
+ - Validation to run before copying into place. Use %s in the command to indicate the current file to validate.
- The command is passed securely so shell features like expansion and pipes won't work.
default: None
encoding:
@@ -94,26 +98,49 @@
newline:
required: false
description:
- - "Specifies the line separator style to use for the modified file. This defaults to the windows line separator (\r\n). Note that the indicated line separator will be used for file output regardless of the original line seperator that appears in the input file."
+ - "Specifies the line separator style to use for the modified file. This defaults to the windows line separator (C(\r\n)). Note that the indicated line separator will be used for file output regardless of the original line separator that appears in the input file."
choices: [ "windows", "unix" ]
default: "windows"
"""
-EXAMPLES = """
-- win_lineinfile: dest=C:\\temp\\example.conf regexp=^name= line="name=JohnDoe"
+EXAMPLES = r"""
+- win_lineinfile:
+ dest: C:\temp\example.conf
+ regexp: '^name='
+ line: 'name=JohnDoe'
-- win_lineinfile: dest=C:\\temp\\example.conf state=absent regexp="^name="
+- win_lineinfile:
+ dest: C:\temp\example.conf
+ regexp: '^name='
+ state: absent
-- win_lineinfile: dest=C:\\temp\\example.conf regexp='^127\.0\.0\.1' line='127.0.0.1 localhost'
+- win_lineinfile:
+ dest: C:\temp\example.conf
+ regexp: '^127\.0\.0\.1'
+ line: '127.0.0.1 localhost'
-- win_lineinfile: dest=C:\\temp\\httpd.conf regexp="^Listen " insertafter="^#Listen " line="Listen 8080"
+- win_lineinfile:
+ dest: C:\temp\httpd.conf
+ regexp: '^Listen '
+ insertafter: '^#Listen '
+ line: Listen 8080
-- win_lineinfile: dest=C:\\temp\\services regexp="^# port for http" insertbefore="^www.*80/tcp" line="# port for http by default"
+- win_lineinfile:
+ dest: C:\temp\services
+ regexp: '^# port for http'
+ insertbefore: '^www.*80/tcp'
+ line: '# port for http by default'
-# Create file if it doesnt exist with a specific encoding
-- win_lineinfile: dest=C:\\temp\\utf16.txt create="yes" encoding="utf-16" line="This is a utf-16 encoded file"
+# Create file if it doesn't exist with a specific encoding
+- win_lineinfile:
+ dest: C:\temp\utf16.txt
+ create: yes
+ encoding: utf-16
+ line: This is a utf-16 encoded file
# Add a line to a file and ensure the resulting file uses unix line separators
-- win_lineinfile: dest=C:\\temp\\testfile.txt line="Line added to file" newline="unix"
-
+- win_lineinfile:
+ dest: C:\temp\testfile.txt
+ line: Line added to file
+ newline: unix
"""
diff --git a/windows/win_msi.py b/windows/win_msi.py
index d426c164506..cfc7e089822 100644
--- a/windows/win_msi.py
+++ b/windows/win_msi.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['deprecated'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_msi
@@ -55,21 +59,24 @@
- Specify whether to wait for install or uninstall to complete before continuing.
choices:
- true
- - yes
- false
- - no
default: false
author: "Matt Martz (@sivel)"
'''
EXAMPLES = '''
-# Install an MSI file
-- win_msi: path=C:\\\\7z920-x64.msi
+- name: Install an MSI file
+ win_msi:
+ path: C:\7z920-x64.msi
-# Install an MSI, and wait for it to complete before continuing
-- win_msi: path=C:\\\\7z920-x64.msi wait=true
+- name: Install an MSI, and wait for it to complete before continuing
+ win_msi:
+ path: C:\7z920-x64.msi
+ wait: true
-# Uninstall an MSI file
-- win_msi: path=C:\\\\7z920-x64.msi state=absent
+- name: Uninstall an MSI file
+ win_msi:
+ path: C:\7z920-x64.msi
+ state: absent
'''
diff --git a/windows/win_ping.py b/windows/win_ping.py
index ecb5149f8c3..6f650e977fd 100644
--- a/windows/win_ping.py
+++ b/windows/win_ping.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_ping
diff --git a/windows/win_reboot.py b/windows/win_reboot.py
index eeb39a9279d..c8f179e7dd4 100644
--- a/windows/win_reboot.py
+++ b/windows/win_reboot.py
@@ -19,6 +19,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION='''
---
module: win_reboot
diff --git a/windows/win_service.py b/windows/win_service.py
index 1f0f6326e65..e4f009a361b 100644
--- a/windows/win_service.py
+++ b/windows/win_service.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_service
@@ -59,12 +63,12 @@
'''
EXAMPLES = '''
- # Restart a service
+- name: Restart a service
win_service:
name: spooler
state: restarted
- # Set service startup mode to auto and ensure it is started
+- name: Set service startup mode to auto and ensure it is started
win_service:
name: spooler
start_mode: auto
diff --git a/windows/win_shell.ps1 b/windows/win_shell.ps1
new file mode 100644
index 00000000000..664858e52ca
--- /dev/null
+++ b/windows/win_shell.ps1
@@ -0,0 +1,142 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+# TODO: add check mode support
+
+Set-StrictMode -Version 2
+$ErrorActionPreference = "Stop"
+
+$helper_def = @"
+using System.Diagnostics;
+using System.IO;
+using System.Threading;
+
+namespace Ansible.Shell
+{
+ public class ProcessUtil
+ {
+ public static void GetProcessOutput(StreamReader stdoutStream, StreamReader stderrStream, out string stdout, out string stderr)
+ {
+ var sowait = new EventWaitHandle(false, EventResetMode.ManualReset);
+ var sewait = new EventWaitHandle(false, EventResetMode.ManualReset);
+
+ string so = null, se = null;
+
+ ThreadPool.QueueUserWorkItem((s)=>
+ {
+ so = stdoutStream.ReadToEnd();
+ sowait.Set();
+ });
+
+ ThreadPool.QueueUserWorkItem((s) =>
+ {
+ se = stderrStream.ReadToEnd();
+ sewait.Set();
+ });
+
+ foreach(var wh in new WaitHandle[] { sowait, sewait })
+ wh.WaitOne();
+
+ stdout = so;
+ stderr = se;
+ }
+ }
+}
+"@
+
+$parsed_args = Parse-Args $args $false
+
+$raw_command_line = $(Get-AnsibleParam $parsed_args "_raw_params" -failifempty $true).Trim()
+$chdir = Get-AnsibleParam $parsed_args "chdir"
+$executable = Get-AnsibleParam $parsed_args "executable"
+$creates = Get-AnsibleParam $parsed_args "creates"
+$removes = Get-AnsibleParam $parsed_args "removes"
+
+$result = @{changed=$true; warnings=@(); cmd=$raw_command_line}
+
+If($creates -and $(Test-Path $creates)) {
+ Exit-Json @{cmd=$raw_command_line; msg="skipped, since $creates exists"; changed=$false; skipped=$true; rc=0}
+}
+
+If($removes -and -not $(Test-Path $removes)) {
+ Exit-Json @{cmd=$raw_command_line; msg="skipped, since $removes does not exist"; changed=$false; skipped=$true; rc=0}
+}
+
+Add-Type -TypeDefinition $helper_def
+
+$exec_args = $null
+
+If(-not $executable -or $executable -eq "powershell") {
+ $exec_application = "powershell"
+
+ # Base64 encode the command so we don't have to worry about the various levels of escaping
+ $encoded_command = [Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($raw_command_line))
+
+ $exec_args = @("-noninteractive", "-encodedcommand", $encoded_command)
+}
+Else {
+ # FUTURE: support arg translation from executable (or executable_args?) to process arguments for arbitrary interpreter?
+ $exec_application = $executable
+ $exec_args = @("/c", $raw_command_line)
+}
+
+$proc = New-Object System.Diagnostics.Process
+$psi = $proc.StartInfo
+$psi.FileName = $exec_application
+$psi.Arguments = $exec_args
+$psi.RedirectStandardOutput = $true
+$psi.RedirectStandardError = $true
+$psi.UseShellExecute = $false
+
+If ($chdir) {
+ $psi.WorkingDirectory = $chdir
+}
+
+$start_datetime = [DateTime]::UtcNow
+
+Try {
+ $proc.Start() | Out-Null # will always return $true for non shell-exec cases
+}
+Catch [System.ComponentModel.Win32Exception] {
+ # fail nicely for "normal" error conditions
+ # FUTURE: this probably won't work on Nano Server
+ $excep = $_
+ Exit-Json @{failed=$true;changed=$false;cmd=$raw_command_line;rc=$excep.Exception.NativeErrorCode;msg=$excep.Exception.Message}
+}
+
+$stdout = $stderr = [string] $null
+
+[Ansible.Shell.ProcessUtil]::GetProcessOutput($proc.StandardOutput, $proc.StandardError, [ref] $stdout, [ref] $stderr) | Out-Null
+
+$result.stdout = $stdout
+$result.stderr = $stderr
+
+# TODO: decode CLIXML stderr output (and other streams?)
+
+$proc.WaitForExit() | Out-Null
+
+$result.rc = $proc.ExitCode
+
+$end_datetime = [DateTime]::UtcNow
+
+$result.start = $start_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$result.end = $end_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$result.delta = $($end_datetime - $start_datetime).ToString("h\:mm\:ss\.ffffff")
+
+Exit-Json $result
diff --git a/windows/win_shell.py b/windows/win_shell.py
new file mode 100644
index 00000000000..6441f2018fa
--- /dev/null
+++ b/windows/win_shell.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Ansible, inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: win_shell
+short_description: Execute shell commands on target hosts.
+version_added: 2.2
+description:
+ - The M(win_shell) module takes the command name followed by a list of space-delimited arguments.
+ It is similar to the M(win_command) module, but runs
+ the command via a shell (defaults to PowerShell) on the target host.
+options:
+ free_form:
+ description:
+ - the win_shell module takes a free form command to run. There is no parameter actually named 'free form'.
+ See the examples!
+ required: true
+ creates:
+ description:
+ - a path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
+ removes:
+ description:
+ - a path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
+ chdir:
+ description:
+ - set the specified path as the current working directory before executing a command
+ executable:
+ description:
+ - change the shell used to execute the command (eg, C(cmd)). The target shell must accept a C(/c) parameter followed by the raw command line to be executed.
+notes:
+ - If you want to run an executable securely and predictably, it may be
+ better to use the M(win_command) module instead. Best practices when writing
+ playbooks will follow the trend of using M(win_command) unless M(win_shell) is
+ explicitly required. When running ad-hoc commands, use your best judgement.
+ - WinRM will not return from a command execution until all child processes created have exited. Thus, it is not possible to use win_shell to spawn long-running child or background processes.
+ Consider creating a Windows service for managing background processes.
+author:
+ - Matt Davis
+'''
+
+EXAMPLES = '''
+# Execute a command in the remote shell; stdout goes to the specified
+# file on the remote.
+- win_shell: C:\\somescript.ps1 >> c:\\somelog.txt
+
+# Change the working directory to somedir/ before executing the command.
+- win_shell: C:\\somescript.ps1 >> c:\\somelog.txt chdir=c:\\somedir
+
+# You can also use the 'args' form to provide the options. This command
+# will change the working directory to somedir/ and will only run when
+# somedir/somelog.txt doesn't exist.
+- win_shell: C:\\somescript.ps1 >> c:\\somelog.txt
+ args:
+ chdir: c:\\somedir
+ creates: c:\\somelog.txt
+
+# Run a command under a non-Powershell interpreter (cmd in this case)
+- win_shell: echo %HOMEDIR%
+ args:
+ executable: cmd
+ register: homedir_out
+'''
+
+RETURN = '''
+msg:
+ description: changed
+ returned: always
+ type: boolean
+ sample: True
+start:
+ description: The command execution start time
+ returned: always
+ type: string
+ sample: '2016-02-25 09:18:26.429568'
+end:
+ description: The command execution end time
+ returned: always
+ type: string
+ sample: '2016-02-25 09:18:26.755339'
+delta:
+ description: The command execution delta time
+ returned: always
+ type: string
+ sample: '0:00:00.325771'
+stdout:
+ description: The command standard output
+ returned: always
+ type: string
+ sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
+stderr:
+ description: The command standard error
+ returned: always
+ type: string
+ sample: 'ls: cannot access foo: No such file or directory'
+cmd:
+ description: The command executed by the task
+ returned: always
+ type: string
+ sample: 'rabbitmqctl join_cluster rabbit@master'
+rc:
+ description: The command return code (0 means success)
+ returned: always
+ type: int
+ sample: 0
+stdout_lines:
+ description: The command standard output split in lines
+ returned: always
+ type: list of strings
+ sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
+'''
diff --git a/windows/win_stat.py b/windows/win_stat.py
index e26655985e2..823dde0d15e 100644
--- a/windows/win_stat.py
+++ b/windows/win_stat.py
@@ -17,6 +17,10 @@
# this is a windows documentation stub, actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_stat
@@ -50,11 +54,12 @@
'''
EXAMPLES = '''
-# Obtain information about a file
-
-- win_stat: path=C:\\foo.ini
+- name: Obtain information about a file
+ win_stat:
+ path: C:\foo.ini
register: file_info
-- debug: var=file_info
+- debug:
+ var: file_info
'''
diff --git a/windows/win_template.py b/windows/win_template.py
index c3a511cad48..f93307a0fcc 100644
--- a/windows/win_template.py
+++ b/windows/win_template.py
@@ -15,6 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = r'''
---
module: win_template
diff --git a/windows/win_user.ps1 b/windows/win_user.ps1
index 0ca11c743af..5eba6ad2a69 100644
--- a/windows/win_user.ps1
+++ b/windows/win_user.ps1
@@ -137,8 +137,16 @@ If ($state -eq 'present') {
[void][system.reflection.assembly]::LoadWithPartialName('System.DirectoryServices.AccountManagement')
$host_name = [System.Net.Dns]::GetHostName()
$pc = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalContext 'Machine', $host_name
- # ValidateCredentials fails if PasswordExpired == 1
- If (!$pc.ValidateCredentials($username, $password)) {
+
+ # ValidateCredentials will fail if either of these are true- just force update...
+ If($user_obj.AccountDisabled -or $user_obj.PasswordExpired) {
+ $password_match = $false
+ }
+ Else {
+ $password_match = $pc.ValidateCredentials($username, $password)
+ }
+
+ If (-not $password_match) {
$user_obj.SetPassword($password)
$result.changed = $true
}
diff --git a/windows/win_user.py b/windows/win_user.py
index 376ff487fb3..3158208db6f 100644
--- a/windows/win_user.py
+++ b/windows/win_user.py
@@ -21,6 +21,10 @@
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'core',
+ 'version': '1.0'}
+
DOCUMENTATION = '''
---
module: win_user